From c19a93f5bf7d159f1572f1ad236caf1bf5874d3e Mon Sep 17 00:00:00 2001
From: Xin Li <7219519+xin-li-67@users.noreply.github.com>
Date: Fri, 21 Apr 2023 19:36:58 +0800
Subject: [PATCH] [MMSIG-77] Translate demo/docs (#2228)
---
README.md | 2 +-
README_CN.md | 2 +-
configs/animal_2d_keypoint/README.md | 2 +-
configs/body_2d_keypoint/README.md | 2 +-
configs/body_3d_keypoint/README.md | 2 +-
configs/face_2d_keypoint/README.md | 2 +-
configs/hand_2d_keypoint/README.md | 2 +-
configs/hand_gesture/README.md | 2 +-
configs/wholebody_2d_keypoint/README.md | 2 +-
demo/docs/{ => en}/2d_animal_demo.md | 2 +-
demo/docs/{ => en}/2d_face_demo.md | 6 +-
demo/docs/{ => en}/2d_hand_demo.md | 3 +-
demo/docs/{ => en}/2d_human_pose_demo.md | 0
demo/docs/{ => en}/2d_wholebody_pose_demo.md | 0
demo/docs/{ => en}/mmdet_modelzoo.md | 6 +-
demo/docs/{ => en}/webcam_api_demo.md | 0
demo/docs/zh_cn/2d_animal_demo.md | 124 ++++++++++++++++
demo/docs/zh_cn/2d_face_demo.md | 88 +++++++++++
demo/docs/zh_cn/2d_hand_demo.md | 101 +++++++++++++
demo/docs/zh_cn/2d_human_pose_demo.md | 146 +++++++++++++++++++
demo/docs/zh_cn/2d_wholebody_pose_demo.md | 108 ++++++++++++++
demo/docs/zh_cn/mmdet_modelzoo.md | 42 ++++++
demo/docs/zh_cn/webcam_api_demo.md | 109 ++++++++++++++
docs/en/merge_docs.sh | 26 ++--
docs/zh_cn/merge_docs.sh | 26 ++--
25 files changed, 760 insertions(+), 45 deletions(-)
rename demo/docs/{ => en}/2d_animal_demo.md (99%)
rename demo/docs/{ => en}/2d_face_demo.md (90%)
rename demo/docs/{ => en}/2d_hand_demo.md (98%)
rename demo/docs/{ => en}/2d_human_pose_demo.md (100%)
rename demo/docs/{ => en}/2d_wholebody_pose_demo.md (100%)
rename demo/docs/{ => en}/mmdet_modelzoo.md (95%)
rename demo/docs/{ => en}/webcam_api_demo.md (100%)
create mode 100644 demo/docs/zh_cn/2d_animal_demo.md
create mode 100644 demo/docs/zh_cn/2d_face_demo.md
create mode 100644 demo/docs/zh_cn/2d_hand_demo.md
create mode 100644 demo/docs/zh_cn/2d_human_pose_demo.md
create mode 100644 demo/docs/zh_cn/2d_wholebody_pose_demo.md
create mode 100644 demo/docs/zh_cn/mmdet_modelzoo.md
create mode 100644 demo/docs/zh_cn/webcam_api_demo.md
diff --git a/README.md b/README.md
index 9f685a1c83..34f9caacda 100644
--- a/README.md
+++ b/README.md
@@ -75,7 +75,7 @@ https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-
- **Support diverse tasks**
We support a wide spectrum of mainstream pose analysis tasks in current research community, including 2d multi-person human pose estimation, 2d hand pose estimation, 2d face landmark detection, 133 keypoint whole-body human pose estimation, 3d human mesh recovery, fashion landmark detection and animal pose estimation.
- See [Demo](demo/docs/) for more information.
+ See [Demo](demo/docs/en) for more information.
- **Higher efficiency and higher accuracy**
diff --git a/README_CN.md b/README_CN.md
index 09522b20ae..1b8121767d 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -72,7 +72,7 @@ https://user-images.githubusercontent.com/15977946/124654387-0fd3c500-ded1-11eb-
- **支持多种人体姿态分析相关任务**
MMPose 支持当前学界广泛关注的主流姿态分析任务:主要包括 2D多人姿态估计、2D手部姿态估计、2D人脸关键点检测、133关键点的全身人体姿态估计、3D人体形状恢复、服饰关键点检测、动物关键点检测等。
- 具体请参考 [功能演示](demo/docs/)。
+ 具体请参考 [功能演示](demo/docs/zh_cn/)。
- **更高的精度和更快的速度**
diff --git a/configs/animal_2d_keypoint/README.md b/configs/animal_2d_keypoint/README.md
index f1e38cb6ba..efcc3841a5 100644
--- a/configs/animal_2d_keypoint/README.md
+++ b/configs/animal_2d_keypoint/README.md
@@ -9,7 +9,7 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_animal_keypoint.md) to
## Demo
-Please follow [DEMO](/demo/docs/2d_animal_demo.md) to generate fancy demos.
+Please follow [DEMO](/demo/docs/en/2d_animal_demo.md) to generate fancy demos.
diff --git a/configs/body_2d_keypoint/README.md b/configs/body_2d_keypoint/README.md
index 468f960754..d005d3fed7 100644
--- a/configs/body_2d_keypoint/README.md
+++ b/configs/body_2d_keypoint/README.md
@@ -14,7 +14,7 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_body_keypoint.md) to pr
## Demo
-Please follow [Demo](/demo/docs/2d_human_pose_demo.md#2d-human-pose-demo) to run demos.
+Please follow [Demo](/demo/docs/en/2d_human_pose_demo.md#2d-human-pose-demo) to run demos.
diff --git a/configs/body_3d_keypoint/README.md b/configs/body_3d_keypoint/README.md
index 698e970cb3..b67f7ce7ac 100644
--- a/configs/body_3d_keypoint/README.md
+++ b/configs/body_3d_keypoint/README.md
@@ -8,6 +8,6 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/3d_body_keypoint.md) to pr
## Demo
-Please follow [Demo](/demo/docs/3d_human_pose_demo.md) to run demos.
+Please follow [Demo](/demo/docs/en/3d_human_pose_demo.md) to run demos.
diff --git a/configs/face_2d_keypoint/README.md b/configs/face_2d_keypoint/README.md
index b77c632b00..9f9370a754 100644
--- a/configs/face_2d_keypoint/README.md
+++ b/configs/face_2d_keypoint/README.md
@@ -11,6 +11,6 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_face_keypoint.md) to pr
## Demo
-Please follow [Demo](/demo/docs/2d_face_demo.md) to run demos.
+Please follow [Demo](/demo/docs/en/2d_face_demo.md) to run demos.
diff --git a/configs/hand_2d_keypoint/README.md b/configs/hand_2d_keypoint/README.md
index cbe39fd39a..6f7758290e 100644
--- a/configs/hand_2d_keypoint/README.md
+++ b/configs/hand_2d_keypoint/README.md
@@ -11,7 +11,7 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_keypoint.md) to pr
## Demo
-Please follow [Demo](/demo/docs/2d_hand_demo.md) to run demos.
+Please follow [Demo](/demo/docs/en/2d_hand_demo.md) to run demos.
diff --git a/configs/hand_gesture/README.md b/configs/hand_gesture/README.md
index 1e91904116..7cc5bb323b 100644
--- a/configs/hand_gesture/README.md
+++ b/configs/hand_gesture/README.md
@@ -8,6 +8,6 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_hand_gesture.md) to pre
## Demo
-Please follow [Demo](/demo/docs/gesture_recognition_demo.md) to run the demo.
+Please follow [Demo](/demo/docs/en/gesture_recognition_demo.md) to run the demo.
diff --git a/configs/wholebody_2d_keypoint/README.md b/configs/wholebody_2d_keypoint/README.md
index 2b5f8812bf..362a6a8976 100644
--- a/configs/wholebody_2d_keypoint/README.md
+++ b/configs/wholebody_2d_keypoint/README.md
@@ -14,6 +14,6 @@ Please follow [DATA Preparation](/docs/en/dataset_zoo/2d_wholebody_keypoint.md)
## Demo
-Please follow [Demo](/demo/docs/2d_wholebody_pose_demo.md) to run demos.
+Please follow [Demo](/demo/docs/en/2d_wholebody_pose_demo.md) to run demos.
diff --git a/demo/docs/2d_animal_demo.md b/demo/docs/en/2d_animal_demo.md
similarity index 99%
rename from demo/docs/2d_animal_demo.md
rename to demo/docs/en/2d_animal_demo.md
index 997f182087..aa9970395b 100644
--- a/demo/docs/2d_animal_demo.md
+++ b/demo/docs/en/2d_animal_demo.md
@@ -39,7 +39,7 @@ The augement `--det-cat-id=15` selected detected bounding boxes with label 'cat'
**COCO-animals**
In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe').
-For other animals, we have also provided some pre-trained animal detection models (1-class models). Supported models can be found in [detection model zoo](/demo/docs/mmdet_modelzoo.md).
+For other animals, we have also provided some pre-trained animal detection models (1-class models). Supported models can be found in [detection model zoo](/demo/docs/en/mmdet_modelzoo.md).
To save visualized results on disk:
diff --git a/demo/docs/2d_face_demo.md b/demo/docs/en/2d_face_demo.md
similarity index 90%
rename from demo/docs/2d_face_demo.md
rename to demo/docs/en/2d_face_demo.md
index e1940cd243..9c60f68487 100644
--- a/demo/docs/2d_face_demo.md
+++ b/demo/docs/en/2d_face_demo.md
@@ -1,8 +1,8 @@
## 2D Face Keypoint Demo
-We provide a demo script to test a single image or video with hand detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0.
+We provide a demo script to test a single image or video with face detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0.
-**Face Box Model Preparation:** The pre-trained face box estimation model can be found in [mmdet model zoo](/demo/docs/mmdet_modelzoo.md).
+**Face Bounding Box Model Preparation:** The pre-trained face box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#face-bounding-box-detection-models).
### 2D Face Image Demo
@@ -98,4 +98,4 @@ In addition, the Inferencer supports saving predicted poses. For more informatio
### Speed Up Inference
-For 2D face keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in [aflw_hrnetv2](../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py#90).
+For 2D face keypoint estimation models, try to edit the config file. For example, set `model.test_cfg.flip_test=False` in line 90 of [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py).
diff --git a/demo/docs/2d_hand_demo.md b/demo/docs/en/2d_hand_demo.md
similarity index 98%
rename from demo/docs/2d_hand_demo.md
rename to demo/docs/en/2d_hand_demo.md
index 63f35de5c6..f47b3695e3 100644
--- a/demo/docs/2d_hand_demo.md
+++ b/demo/docs/en/2d_hand_demo.md
@@ -2,7 +2,7 @@
We provide a demo script to test a single image or video with hand detectors and top-down pose estimators. Assume that you have already installed [mmdet](https://github.com/open-mmlab/mmdetection) with version >= 3.0.
-**Hand Box Model Preparation:** The pre-trained hand box estimation model can be found in [mmdet model zoo](/demo/docs/mmdet_modelzoo.md).
+**Hand Box Model Preparation:** The pre-trained hand box estimation model can be found in [mmdet model zoo](/demo/docs/en/mmdet_modelzoo.md#hand-bounding-box-detection-models).
### 2D Hand Image Demo
@@ -14,7 +14,6 @@ python demo/topdown_demo_with_mmdet.py \
[--show] [--device ${GPU_ID or CPU}] [--save-predictions] \
[--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \
[--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}]
-
```
The pre-trained hand pose estimation model can be downloaded from [model zoo](https://mmpose.readthedocs.io/en/latest/model_zoo/hand_2d_keypoint.html).
diff --git a/demo/docs/2d_human_pose_demo.md b/demo/docs/en/2d_human_pose_demo.md
similarity index 100%
rename from demo/docs/2d_human_pose_demo.md
rename to demo/docs/en/2d_human_pose_demo.md
diff --git a/demo/docs/2d_wholebody_pose_demo.md b/demo/docs/en/2d_wholebody_pose_demo.md
similarity index 100%
rename from demo/docs/2d_wholebody_pose_demo.md
rename to demo/docs/en/2d_wholebody_pose_demo.md
diff --git a/demo/docs/mmdet_modelzoo.md b/demo/docs/en/mmdet_modelzoo.md
similarity index 95%
rename from demo/docs/mmdet_modelzoo.md
rename to demo/docs/en/mmdet_modelzoo.md
index a50be168a5..5383cb953f 100644
--- a/demo/docs/mmdet_modelzoo.md
+++ b/demo/docs/en/mmdet_modelzoo.md
@@ -7,7 +7,7 @@ MMDetection provides 80-class COCO-pretrained models, which already includes the
### Hand Bounding Box Detection Models
-For hand bounding box detection, we simply train our hand box models on onehand10k dataset using MMDetection.
+For hand bounding box detection, we simply train our hand box models on OneHand10K dataset using MMDetection.
#### Hand detection results on OneHand10K test set
@@ -19,7 +19,7 @@ For hand bounding box detection, we simply train our hand box models on onehand1
For face bounding box detection, we train a YOLOX detector on COCO-face data using MMDetection.
-#### Hand detection results on OneHand10K test set
+#### Face detection results on COCO-face test set
| Arch | Box AP | ckpt |
| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: |
@@ -29,8 +29,6 @@ For face bounding box detection, we train a YOLOX detector on COCO-face data usi
#### COCO animals
-
-
In COCO dataset, there are 80 object categories, including 10 common `animal` categories (14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe')
For animals in the categories, please download from [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html).
diff --git a/demo/docs/webcam_api_demo.md b/demo/docs/en/webcam_api_demo.md
similarity index 100%
rename from demo/docs/webcam_api_demo.md
rename to demo/docs/en/webcam_api_demo.md
diff --git a/demo/docs/zh_cn/2d_animal_demo.md b/demo/docs/zh_cn/2d_animal_demo.md
new file mode 100644
index 0000000000..e49f292f56
--- /dev/null
+++ b/demo/docs/zh_cn/2d_animal_demo.md
@@ -0,0 +1,124 @@
+## 2D Animal Pose Demo
+
+本系列文档我们会来介绍如何使用提供了的脚本进行完成基本的推理 demo ,本节先介绍如何对 top-down 结构和动物的 2D 姿态进行单张图片和视频推理,请确保你已经安装了 3.0 以上版本的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。
+
+### 2D 动物图片姿态识别推理
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} --det-cat-id ${DET_CAT_ID} \
+ [--show] [--output-root ${OUTPUT_DIR}] [--save-predictions] \
+ [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \
+ [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}] \
+ [--device ${GPU_ID or CPU}]
+```
+
+用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/animal_2d_keypoint.html) 获取预训练好的关键点识别模型。
+
+这里我们用 [animalpose model](https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth) 来进行演示:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \
+ https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \
+ --input tests/data/animalpose/ca110.jpeg \
+ --show --draw-heatmap --det-cat-id=15
+```
+
+可视化结果如下:
+
+
+
+如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。
+
+`--det-cat-id=15` 参数用来指定模型只检测 `cat` 类型,这是基于 COCO 数据集的数据。
+
+**COCO 数据集动物信息**
+
+COCO 数据集共包含 80 个类别,其中有 10 种常见动物,类别如下:
+
+(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe')
+
+对于其他类型的动物,我们也提供了一些训练好的动物检测模型,用户可以前往 [detection model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md) 下载。
+
+如果想本地保存可视化结果可使用如下命令:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \
+ https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \
+ --input tests/data/animalpose/ca110.jpeg \
+ --output-root vis_results --draw-heatmap --det-cat-id=15
+```
+
+如果想本地保存预测结果,需要使用 `--save-predictions` 。
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \
+ https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \
+ --input tests/data/animalpose/ca110.jpeg \
+ --output-root vis_results --save-predictions --draw-heatmap --det-cat-id=15
+```
+
+仅使用 CPU:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \
+ https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \
+ --input tests/data/animalpose/ca110.jpeg \
+ --show --draw-heatmap --det-cat-id=15 --device cpu
+```
+
+### 2D 动物视频姿态识别推理
+
+视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。
+
+例如:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py \
+ https://download.openmmlab.com/mmpose/animal/hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth \
+ --input demo/resources/
\
+ --output-root vis_results --draw-heatmap --det-cat-id=16
+```
+
+
+
+这段视频可以在 [Google Drive](https://drive.google.com/file/d/18d8K3wuUpKiDFHvOx0mh1TEwYwpOc5UO/view?usp=sharing) 下载。
+
+### 使用 Inferencer 进行 2D 动物姿态识别推理
+
+Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用:
+
+```shell
+python demo/inferencer_demo.py tests/data/ap10k \
+ --pose2d animal --vis-out-dir vis_results/ap10k
+```
+
+该命令会对输入的 `tests/data/ap10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/ap10k` 文件夹下。
+
+
+
+Inferencer 同样支持保存预测结果,更多的信息可以参考 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 。
+
+### 加速推理
+
+用户可以通过修改配置文件来加速,更多具体例子可以参考:
+
+1. 设置 `model.test_cfg.flip_test=False`,如 [animalpose_hrnet-w32](../../configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py#85) 所示。
+2. 使用更快的 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。
diff --git a/demo/docs/zh_cn/2d_face_demo.md b/demo/docs/zh_cn/2d_face_demo.md
new file mode 100644
index 0000000000..e8a4e550db
--- /dev/null
+++ b/demo/docs/zh_cn/2d_face_demo.md
@@ -0,0 +1,88 @@
+## 2D Face Keypoint Demo
+
+本节我们继续演示如何使用 demo 脚本进行 2D 脸部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMdetection](https://github.com/open-mmlab/mmdetection) 。
+
+我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#脸部-bounding-box-检测模型) 提供了一个预训练好的脸部 Bounding Box 预测模型,用户可以前往下载。
+
+### 2D 脸部图片关键点识别推理
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \
+ [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \
+ [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \
+ [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}]
+```
+
+用户可以在 [model zoo](https://mmpose.readthedocs.io/en/dev-1.x/model_zoo/face_2d_keypoint.html) 获取预训练好的脸部关键点识别模型。
+
+这里我们用 [aflw model](https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth) 来进行演示:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \
+ configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \
+ https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \
+ --input tests/data/cofw/001766.jpg \
+ --show --draw-heatmap
+```
+
+可视化结果如下图所示:
+
+
+
+如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。
+
+如果想本地保存可视化结果可使用如下命令:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \
+ configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \
+ https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \
+ --input tests/data/cofw/001766.jpg \
+ --draw-heatmap --output-root vis_results
+```
+
+### 2D 脸部视频关键点识别推理
+
+视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth \
+ configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py \
+ https://download.openmmlab.com/mmpose/face/hrnetv2/hrnetv2_w18_aflw_256x256-f2bbc62b_20210125.pth \
+ --input demo/resources/ \
+ --show --draw-heatmap --output-root vis_results
+```
+
+
+
+这段视频可以在 [Google Drive](https://drive.google.com/file/d/1kQt80t6w802b_vgVcmiV_QfcSJ3RWzmb/view?usp=sharing) 下载。
+
+### 使用 Inferencer 进行 2D 脸部关键点识别推理
+
+Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用:
+
+```shell
+python demo/inferencer_demo.py tests/data/wflw \
+ --pose2d face --vis-out-dir vis_results/wflw --radius 1
+```
+
+该命令会对输入的 `tests/data/wflw` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/wflw` 文件夹下。
+
+
+
+
+
+除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/en/dev-1.x/user_guides/inference.html#inferencer-a-unified-inference-interface) 查看。
+
+### 加速推理
+
+对于 2D 脸部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,例如 [aflw_hrnetv2](../../../configs/face_2d_keypoint/topdown_heatmap/aflw/td-hm_hrnetv2-w18_8xb64-60e_aflw-256x256.py) 中的第 90 行。
diff --git a/demo/docs/zh_cn/2d_hand_demo.md b/demo/docs/zh_cn/2d_hand_demo.md
new file mode 100644
index 0000000000..c2d80edd4e
--- /dev/null
+++ b/demo/docs/zh_cn/2d_hand_demo.md
@@ -0,0 +1,101 @@
+## 2D Hand Keypoint Demo
+
+本节我们继续通过 demo 脚本演示对单张图片或者视频的 2D 手部关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [MMDetection](https://github.com/open-mmlab/mmdetection) 。
+
+我们在 [mmdet model zoo](/demo/docs/zh_cn/mmdet_modelzoo.md#手部-bounding-box-识别模型) 提供了预训练好的手部 Bounding Box 预测模型,用户可以前往下载。
+
+### 2D 手部图片关键点识别
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} [--output-root ${OUTPUT_DIR}] \
+ [--show] [--device ${GPU_ID or CPU}] [--save-predictions] \
+ [--draw-heatmap ${DRAW_HEATMAP}] [--radius ${KPT_RADIUS}] \
+ [--kpt-thr ${KPT_SCORE_THR}] [--bbox-thr ${BBOX_SCORE_THR}]
+```
+
+用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/hand_2d_keypoint.html) 获取预训练好的关键点识别模型。
+
+这里我们用 [onehand10k model](https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth) 来进行演示:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \
+ configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \
+ https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \
+ --input tests/data/onehand10k/9.jpg \
+ --show --draw-heatmap
+```
+
+可视化结果如下:
+
+
+
+如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。
+
+如果想本地保存可视化结果可使用如下命令:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \
+ configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \
+ https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \
+ --input tests/data/onehand10k/9.jpg \
+ --output-root vis_results --show --draw-heatmap
+```
+
+如果想本地保存预测结果,需要添加 `--save-predictions` 。
+
+如果想用 CPU 进行 demo 需添加 `--device cpu` :
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \
+ configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \
+ https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \
+ --input tests/data/onehand10k/9.jpg \
+ --show --draw-heatmap --device cpu
+```
+
+### 2D 手部视频关键点识别推理
+
+视频和图片使用了同样的接口,区别在于视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py \
+ https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth \
+ configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py \
+ https://download.openmmlab.com/mmpose/hand/hrnetv2/hrnetv2_w18_onehand10k_256x256-30bc9c6b_20210330.pth \
+ --input demo/resources/ \
+ --output-root vis_results --show --draw-heatmap
+```
+
+
+
+这段视频可以在 [Google Drive](https://raw.githubusercontent.com/open-mmlab/mmpose/master/tests/data/nvgesture/sk_color.avi) 下载到。
+
+### 使用 Inferencer 进行 2D 手部关键点识别推理
+
+Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用:
+
+```shell
+python demo/inferencer_demo.py tests/data/onehand10k \
+ --pose2d hand --vis-out-dir vis_results/onehand10k \
+ --bbox-thr 0.5 --kpt-thr 0.05
+```
+
+该命令会对输入的 `tests/data/onehand10k` 下所有的图片进行推理并且把可视化结果都存入 `vis_results/onehand10k` 文件夹下。
+
+
+
+除此之外, Inferencer 也支持保存预测的姿态结果。具体信息可在 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 查看。
+
+### 加速推理
+
+对于 2D 手部关键点预测模型,用户可以通过修改配置文件中的 `model.test_cfg.flip_test=False` 来加速,如 [onehand10k_hrnetv2](../../configs/hand_2d_keypoint/topdown_heatmap/onehand10k/td-hm_hrnetv2-w18_8xb64-210e_onehand10k-256x256.py#90) 所示。
diff --git a/demo/docs/zh_cn/2d_human_pose_demo.md b/demo/docs/zh_cn/2d_human_pose_demo.md
new file mode 100644
index 0000000000..ff6484301a
--- /dev/null
+++ b/demo/docs/zh_cn/2d_human_pose_demo.md
@@ -0,0 +1,146 @@
+## 2D Human Pose Demo
+
+本节我们继续使用 demo 脚本演示 2D 人体关键点的识别。同样的,用户仍要确保开发环境已经安装了 3.0 版本以上的 [mmdet](https://github.com/open-mmlab/mmdetection) 。
+
+### 2D 人体姿态 Top-Down 图片检测
+
+#### 使用整张图片作为输入进行检测
+
+此时输入的整张图片会被当作 bounding box 使用。
+
+```shell
+python demo/image_demo.py \
+ ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --out-file ${OUTPUT_FILE} \
+ [--device ${GPU_ID or CPU}] \
+ [--draw_heatmap]
+```
+
+如果使用了 heatmap-based 模型同时设置了 `--draw-heatmap` ,预测的热图也会跟随关键点一同可视化出来。
+
+用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo/body_2d_keypoint.html) 获取预训练好的关键点识别模型。
+
+这里我们用 [coco model](https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth) 来进行演示:
+
+```shell
+python demo/image_demo.py \
+ tests/data/coco/000000000785.jpg \
+ configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \
+ https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \
+ --out-file vis_results.jpg \
+ --draw-heatmap
+```
+
+使用 CPU 推理:
+
+```shell
+python demo/image_demo.py \
+ tests/data/coco/000000000785.jpg \
+ configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w48_8xb32-210e_coco-256x192.py \
+ https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \
+ --out-file vis_results.jpg \
+ --draw-heatmap \
+ --device=cpu
+```
+
+可视化结果如下:
+
+
+
+#### 使用 MMDet 做人体 bounding box 检测
+
+使用 MMDet 进行识别的命令如下所示:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} \
+ [--output-root ${OUTPUT_DIR}] [--save-predictions] \
+ [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \
+ [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}]
+```
+
+结合我们的具体例子:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \
+ https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \
+ --input tests/data/coco/000000197388.jpg --show --draw-heatmap \
+ --output-root vis_results/
+```
+
+可视化结果如下:
+
+
+
+想要本地保存识别结果,用户需要加上 `--save-predictions` 。
+
+### 2D 人体姿态 Top-Down 视频检测
+
+我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估,视频推理时 `${INPUT_PATH}` 既可以是本地视频文件的路径也可以是视频文件的 **URL** 地址。
+
+例如:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py \
+ https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192-81c58e40_20220909.pth \
+ --input tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \
+ --output-root=vis_results/demo --show --draw-heatmap
+```
+
+### 2D 人体姿态 Bottom-Up 图片和视频识别检测
+
+除了 Top-Down ,我们也支持 Bottom-Up 不依赖人体识别器的人体姿态预估识别,使用方式如下:
+
+```shell
+python demo/bottomup_demo.py \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} \
+ [--output-root ${OUTPUT_DIR}] [--save-predictions] \
+ [--show] [--device ${GPU_ID or CPU}] \
+ [--kpt-thr ${KPT_SCORE_THR}]
+```
+
+结合具体示例如下:
+
+```shell
+python demo/bottomup_demo.py \
+ configs/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512.py \
+ https://download.openmmlab.com/mmpose/v1/body_2d_keypoint/dekr/coco/dekr_hrnet-w32_8xb10-140e_coco-512x512_ac7c17bf-20221228.pth \
+ --input tests/data/coco/000000197388.jpg --output-root=vis_results \
+ --show --save-predictions
+```
+
+其可视化结果如图所示:
+
+
+
+### 使用 Inferencer 进行 2D 人体姿态识别检测
+
+Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用:
+
+```shell
+python demo/inferencer_demo.py \
+ tests/data/posetrack18/videos/000001_mpiinew_test/000001_mpiinew_test.mp4 \
+ --pose2d human --vis-out-dir vis_results/posetrack18
+```
+
+该命令会对输入的 `tests/data/posetrack18` 下的视频进行推理并且把可视化结果存入 `vis_results/posetrack18` 文件夹下。
+
+
+
+Inferencer 支持保存姿态的检测结果,具体的使用可参考 [inferencer document](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/inference.html) 。
+
+### 加速推理
+
+对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考:
+
+1. 设置 `model.test_cfg.flip_test=False`,如 [topdown-res50](/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_res50_8xb64-210e_coco-256x192.py#L56) 所示。
+2. 使用更快的人体 bounding box 检测器,可参考 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。
diff --git a/demo/docs/zh_cn/2d_wholebody_pose_demo.md b/demo/docs/zh_cn/2d_wholebody_pose_demo.md
new file mode 100644
index 0000000000..8c901d47fa
--- /dev/null
+++ b/demo/docs/zh_cn/2d_wholebody_pose_demo.md
@@ -0,0 +1,108 @@
+## 2D Human Whole-Body Pose Demo
+
+### 2D 人体全身姿态 Top-Down 图片识别
+
+#### 使用整张图片作为输入进行检测
+
+此时输入的整张图片会被当作 bounding box 使用。
+
+```shell
+python demo/image_demo.py \
+ ${IMG_FILE} ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --out-file ${OUTPUT_FILE} \
+ [--device ${GPU_ID or CPU}] \
+ [--draw_heatmap]
+```
+
+用户可以在 [model zoo](https://mmpose.readthedocs.io/zh_CN/dev-1.x/model_zoo/2d_wholebody_keypoint.html) 获取预训练好的关键点识别模型。
+
+这里我们用 [coco-wholebody_vipnas_res50_dark](https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth) 来进行演示:
+
+```shell
+python demo/image_demo.py \
+ tests/data/coco/000000000785.jpg \
+ configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \
+ https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \
+ --out-file vis_results.jpg
+```
+
+使用 CPU 推理:
+
+```shell
+python demo/image_demo.py \
+ tests/data/coco/000000000785.jpg \
+ configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_vipnas-res50_dark-8xb64-210e_coco-wholebody-256x192.py \
+ https://download.openmmlab.com/mmpose/top_down/vipnas/vipnas_res50_wholebody_256x192_dark-67c0ce35_20211112.pth \
+ --out-file vis_results.jpg \
+ --device=cpu
+```
+
+#### 使用 MMDet 进行人体 bounding box 检测
+
+使用 MMDet 进行识别的命令格式如下:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ ${MMDET_CONFIG_FILE} ${MMDET_CHECKPOINT_FILE} \
+ ${MMPOSE_CONFIG_FILE} ${MMPOSE_CHECKPOINT_FILE} \
+ --input ${INPUT_PATH} \
+ [--output-root ${OUTPUT_DIR}] [--save-predictions] \
+ [--show] [--draw-heatmap] [--device ${GPU_ID or CPU}] \
+ [--bbox-thr ${BBOX_SCORE_THR}] [--kpt-thr ${KPT_SCORE_THR}]
+```
+
+具体可例如:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \
+ https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \
+ --input tests/data/coco/000000196141.jpg \
+ --output-root vis_results/ --show
+```
+
+想要本地保存识别结果,用户需要加上 `--save-predictions` 。
+
+### 2D 人体全身姿态 Top-Down 视频识别检测
+
+我们的脚本同样支持视频作为输入,由 MMDet 完成人体检测后 MMPose 完成 Top-Down 的姿态预估。
+
+例如:
+
+```shell
+python demo/topdown_demo_with_mmdet.py \
+ demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py \
+ https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \
+ configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py \
+ https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth \
+ --input https://user-images.githubusercontent.com/87690686/137440639-fb08603d-9a35-474e-b65f-46b5c06b68d6.mp4 \
+ --output-root vis_results/ --show
+```
+
+可视化结果如下:
+
+
+
+### 使用 Inferencer 进行 2D 人体全身姿态识别
+
+Inferencer 提供一个更便捷的推理接口,使得用户可以绕过模型的配置文件和 checkpoint 路径直接使用 model aliases ,支持包括图片路径、视频路径、图片文件夹路径和 webcams 在内的多种输入方式,例如可以这样使用:
+
+```shell
+python demo/inferencer_demo.py tests/data/crowdpose \
+ --pose2d wholebody --vis-out-dir vis_results/crowdpose
+```
+
+该命令会对输入的 `tests/data/crowdpose` 下所有图片进行推理并且把可视化结果存入 `vis_results/crowdpose` 文件夹下。
+
+
+
+Inferencer 支持保存姿态的检测结果,具体的使用可参考 [Inferencer 文档](https://mmpose.readthedocs.io/zh_CN/dev-1.x/user_guides/#inferencer-a-unified-inference-interface) 。
+
+### 加速推理
+
+对于 top-down 结构的模型,用户可以通过修改配置文件来加速,更多具体例子可以参考:
+
+1. 设置 `model.test_cfg.flip_test=False`,用户可参考 [pose_hrnet_w48_dark+](/configs/wholebody_2d_keypoint/topdown_heatmap/coco-wholebody/td-hm_hrnet-w48_dark-8xb32-210e_coco-wholebody-384x288.py#L90) 。
+2. 使用更快的人体 bounding box 检测器,如 [MMDetection](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 。
diff --git a/demo/docs/zh_cn/mmdet_modelzoo.md b/demo/docs/zh_cn/mmdet_modelzoo.md
new file mode 100644
index 0000000000..aabfb1768d
--- /dev/null
+++ b/demo/docs/zh_cn/mmdet_modelzoo.md
@@ -0,0 +1,42 @@
+## Pre-trained Detection Models
+
+### 人体 Bounding Box 检测模型
+
+MMDetection 提供了基于 COCO 的包括 `person` 在内的 80 个类别的预训练模型,用户可前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载并将其用作人体 bounding box 识别模型。
+
+### 手部 Bounding Box 检测模型
+
+对于手部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 OneHand10K 数据库训练的模型。
+
+#### 基于 OneHand10K 测试集的测试结果
+
+| Arch | Box AP | ckpt | log |
+| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: |
+| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.817 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k-dac19597_20201030.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_onehand10k_20201030.log.json) |
+
+### 脸部 Bounding Box 检测模型
+
+对于脸部 bounding box 检测模型,我们提供了一个通过 MMDetection 基于 COCO-Face 数据库训练的 YOLOX 检测器。
+
+#### 基于 COCO-face 测试集的测试结果
+
+| Arch | Box AP | ckpt |
+| :-------------------------------------------------------------- | :----: | :----------------------------------------------------------------------------------------------------: |
+| [YOLOX-s](/demo/mmdetection_cfg/yolox-s_8xb8-300e_coco-face.py) | 0.408 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/yolo-x_8xb8-300e_coco-face_13274d7c.pth) |
+
+### 动物 Bounding Box 检测模型
+
+#### COCO animals
+
+COCO 数据集内包括了 10 种常见的 `animal` 类型:
+
+(14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe') 。
+
+用户如果需要使用以上类别的动物检测模型,可以前往 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/zh_CN/3.x/model_zoo.html) 下载。
+
+#### 基于 MacaquePose 测试集的测试结果
+
+| Arch | Box AP | ckpt | log |
+| :---------------------------------------------------------------- | :----: | :---------------------------------------------------------------: | :--------------------------------------------------------------: |
+| [Faster_R-CNN_Res50-FPN-1class](/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1class.py) | 0.840 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque-f64f2812_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/faster_rcnn_r50_fpn_1x_macaque_20210409.log.json) |
+| [Cascade_R-CNN X-101-64x4d-FPN-1class](/demo/mmdetection_cfg/cascade_rcnn_x101_64x4d_fpn_1class.py) | 0.879 | [ckpt](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque-e45e36f5_20210409.pth) | [log](https://download.openmmlab.com/mmpose/mmdet_pretrained/cascade_rcnn_x101_64x4d_fpn_20e_macaque_20210409.log.json) |
diff --git a/demo/docs/zh_cn/webcam_api_demo.md b/demo/docs/zh_cn/webcam_api_demo.md
new file mode 100644
index 0000000000..acc1aa9b0a
--- /dev/null
+++ b/demo/docs/zh_cn/webcam_api_demo.md
@@ -0,0 +1,109 @@
+## Webcam Demo
+
+我们提供了同时支持人体和动物的识别和 2D 姿态预估 webcam demo 工具,用户也可以用这个脚本在姿态预测结果上加入譬如大眼和戴墨镜等好玩的特效。
+
+
+
+
+
+### Get started
+
+脚本使用方式很简单,直接在 MMPose 根路径使用:
+
+```shell
+# 使用 GPU
+python demo/webcam_api_demo.py
+
+# 仅使用 CPU
+python demo/webcam_api_demo.py --cpu
+```
+
+该命令会使用默认的 `demo/webcam_cfg/human_pose.py` 作为配置文件,用户可以自行指定别的配置:
+
+```shell
+python demo/webcam_api_demo.py --config demo/webcam_cfg/human_pose.py
+```
+
+### Hotkeys
+
+| Hotkey | Function |
+| ------ | ------------------------------------- |
+| v | Toggle the pose visualization on/off. |
+| h | Show help information. |
+| m | Show the monitoring information. |
+| q | Exit. |
+
+注意:脚本会自动将实时结果保存成一个名为 `webcam_api_demo.mp4` 的视频文件。
+
+### 配置使用
+
+这里我们只进行一些基本的说明,更多的信息可以直接参考对应的配置文件。
+
+- **设置检测模型**
+
+ 用户可以直接使用 [MMDetection Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html) 里的识别模型,需要注意的是确保配置文件中的 DetectorNode 里的 `model_config` 和 `model_checkpoint` 需要对应起来,这样模型就会被自动下载和加载,例如:
+
+ ```python
+ # 'DetectorNode':
+ # This node performs object detection from the frame image using an
+ # MMDetection model.
+ dict(
+ type='DetectorNode',
+ name='detector',
+ model_config='demo/mmdetection_cfg/'
+ 'ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',
+ model_checkpoint='https://download.openmmlab.com'
+ '/mmdetection/v2.0/ssd/'
+ 'ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_'
+ 'scratch_600e_coco_20210629_110627-974d9307.pth',
+ input_buffer='_input_',
+ output_buffer='det_result'),
+ ```
+
+- **设置姿态预估模型**
+
+ 这里我们用两个 [top-down](https://github.com/open-mmlab/mmpose/tree/latest/configs/body_2d_keypoint/topdown_heatmap) 结构的人体和动物姿态预估模型进行演示。用户可以自由使用 [MMPose Model Zoo](https://mmpose.readthedocs.io/zh_CN/latest/model_zoo/body_2d_keypoint.html) 里的模型。需要注意的是,更换模型后用户需要在对应的 pose estimate node 里添加或修改对应的 `cls_names` ,例如:
+
+ ```python
+ # 'TopdownPoseEstimatorNode':
+ # This node performs keypoint detection from the frame image using an
+ # MMPose top-down model. Detection results is needed.
+ dict(
+ type='TopdownPoseEstimatorNode',
+ name='human pose estimator',
+ model_config='configs/wholebody_2d_keypoint/'
+ 'topdown_heatmap/coco-wholebody/'
+ 'td-hm_vipnas-mbv3_dark-8xb64-210e_coco-wholebody-256x192.py',
+ model_checkpoint='https://download.openmmlab.com/mmpose/'
+ 'top_down/vipnas/vipnas_mbv3_coco_wholebody_256x192_dark'
+ '-e2158108_20211205.pth',
+ labels=['person'],
+ input_buffer='det_result',
+ output_buffer='human_pose'),
+ dict(
+ type='TopdownPoseEstimatorNode',
+ name='animal pose estimator',
+ model_config='configs/animal_2d_keypoint/topdown_heatmap/'
+ 'animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py',
+ model_checkpoint='https://download.openmmlab.com/mmpose/animal/'
+ 'hrnet/hrnet_w32_animalpose_256x256-1aa7f075_20210426.pth',
+ labels=['cat', 'dog', 'horse', 'sheep', 'cow'],
+ input_buffer='human_pose',
+ output_buffer='animal_pose'),
+ ```
+
+- **使用本地视频文件**
+
+ 如果想直接使用本地的视频文件,用户只需要把文件路径设置到 `camera_id` 就行。
+
+- **本机没有摄像头怎么办**
+
+ 用户可以在自己手机安装上一些 app 就能替代摄像头,例如 [Camo](https://reincubate.com/camo/) 和 [DroidCam](https://www.dev47apps.com/) 。
+
+- **测试摄像头和显示器连接**
+
+ 使用如下命令就能完成检测:
+
+ ```shell
+ python demo/webcam_api_demo.py --config demo/webcam_cfg/test_camera.py
+ ```
diff --git a/docs/en/merge_docs.sh b/docs/en/merge_docs.sh
index 9dd222d3d0..23af31dd56 100644
--- a/docs/en/merge_docs.sh
+++ b/docs/en/merge_docs.sh
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# Copyright (c) OpenMMLab. All rights reserved.
-sed -i '$a\\n' ../../demo/docs/*_demo.md
-cat ../../demo/docs/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' >demos.md
+sed -i '$a\\n' ../../demo/docs/en/*_demo.md
+cat ../../demo/docs/en/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md
# remove /docs/ for link used in doc site
sed -i 's=](/docs/en/=](=g' overview.md
@@ -18,14 +18,14 @@ sed -i 's=](/docs/en/=](=g' ./notes/*.md
sed -i 's=](/docs/en/=](=g' ./projects/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' overview.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' installation.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' quick_run.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' migration.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./advanced_guides/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./model_zoo/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./model_zoo_papers/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./user_guides/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./dataset_zoo/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./notes/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./projects/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md
diff --git a/docs/zh_cn/merge_docs.sh b/docs/zh_cn/merge_docs.sh
index 3b9f8f0e1b..258141d5f8 100644
--- a/docs/zh_cn/merge_docs.sh
+++ b/docs/zh_cn/merge_docs.sh
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# Copyright (c) OpenMMLab. All rights reserved.
-sed -i '$a\\n' ../../demo/docs/*_demo.md
-cat ../../demo/docs/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' >demos.md
+sed -i '$a\\n' ../../demo/docs/zh_cn/*_demo.md
+cat ../../demo/docs/zh_cn/*_demo.md | sed "s/^## 2D\(.*\)Demo/##\1Estimation/" | sed "s/md###t/html#t/g" | sed '1i\# Demos\n' | sed 's=](/docs/en/=](/=g' | sed 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' >demos.md
# remove /docs/ for link used in doc site
sed -i 's=](/docs/zh_cn/=](=g' overview.md
@@ -18,14 +18,14 @@ sed -i 's=](/docs/zh_cn/=](=g' ./notes/*.md
sed -i 's=](/docs/zh_cn/=](=g' ./projects/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' overview.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' installation.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' quick_run.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' migration.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./advanced_guides/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./model_zoo/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./model_zoo_papers/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./user_guides/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./dataset_zoo/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./notes/*.md
-sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/main/=g' ./projects/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' overview.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' installation.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' quick_run.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' migration.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./advanced_guides/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./model_zoo_papers/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./user_guides/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./dataset_zoo/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./notes/*.md
+sed -i 's=](/=](https://github.com/open-mmlab/mmpose/tree/dev-1.x/=g' ./projects/*.md