From 5274023f49a8d73b795462f967e984d0184caebe Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Tue, 10 Jan 2023 17:51:54 +0800 Subject: [PATCH 1/2] [Enhance] Update rtmdet config and readme. --- configs/rtmdet/README.md | 42 +++++++++++++++++-- .../README.md | 11 +++-- .../cspnext-l_8xb256-rsb-a1-600e_in1k.py | 5 +++ .../cspnext-m_8xb256-rsb-a1-600e_in1k.py | 5 +++ .../cspnext-s_8xb256-rsb-a1-600e_in1k.py | 4 +- .../cspnext-tiny_8xb256-rsb-a1-600e_in1k.py | 0 .../cspnext-x_8xb256-rsb-a1-600e_in1k.py | 5 +++ 7 files changed, 63 insertions(+), 9 deletions(-) rename configs/rtmdet/{cspnext_imagenet_pretrain => classification}/README.md (52%) create mode 100644 configs/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py create mode 100644 configs/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py rename configs/rtmdet/{cspnext_imagenet_pretrain => classification}/cspnext-s_8xb256-rsb-a1-600e_in1k.py (93%) rename configs/rtmdet/{cspnext_imagenet_pretrain => classification}/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py (100%) create mode 100644 configs/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py diff --git a/configs/rtmdet/README.md b/configs/rtmdet/README.md index ae2df867ef6..cef0be5608d 100644 --- a/configs/rtmdet/README.md +++ b/configs/rtmdet/README.md @@ -16,7 +16,7 @@ In this paper, we aim to design an efficient real-time object detector that exce ## Results and Models -## Object Detection +### Object Detection | Model | size | box AP | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms) | Config | Download | | :---------: | :--: | :----: | :-------: | :------: | :------------------: | :----------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | @@ -31,7 +31,7 @@ In this paper, we aim to design an efficient real-time object detector that exce 1. The inference speed of RTMDet is measured on an NVIDIA 3090 GPU with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1, and without NMS. 2. For a fair comparison, the config of bbox postprocessing is changed to be consistent with YOLOv5/6/7 after [PR#9494](https://github.com/open-mmlab/mmdetection/pull/9494), bringing about 0.1~0.3% AP improvement. -## Instance Segmentation +### Instance Segmentation RTMDet-Ins is the state-of-the-art real-time instance segmentation on coco dataset: @@ -49,7 +49,7 @@ RTMDet-Ins is the state-of-the-art real-time instance segmentation on coco datas 1. The inference speed of RTMDet-Ins is measured on an NVIDIA 3090 GPU with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1. Top 100 masks are kept and the post process latency is included. -## Rotated Object Detection +### Rotated Object Detection RTMDet-R achieves state-of-the-art on various remote sensing datasets. @@ -63,6 +63,30 @@ RTMDet-R achieves state-of-the-art on various remote sensing datasets. Models and configs of RTMDet-R are available in [MMRotate](https://github.com/open-mmlab/mmrotate/tree/1.x/configs/rotated_rtmdet). +| Backbone | pretrain | Aug | mmAP | mAP50 | mAP75 | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms) | Config | Download | +| :---------: | :------: | :---: | :---: | :---: | :---: | :-------: | :------: | :------------------: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-tiny | IN | RR | 47.37 | 75.36 | 50.64 | 4.88 | 20.45 | 4.40 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota/rotated_rtmdet_tiny-3x-dota-9d821076.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota/rotated_rtmdet_tiny-3x-dota_20221201_120814.json) | +| RTMDet-tiny | IN | MS+RR | 53.59 | 79.82 | 58.87 | 4.88 | 20.45 | 4.40 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms/rotated_rtmdet_tiny-3x-dota_ms-f12286ff.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_tiny-3x-dota_ms/rotated_rtmdet_tiny-3x-dota_ms_20221113_201235.log) | +| RTMDet-s | IN | RR | 48.16 | 76.93 | 50.59 | 8.86 | 37.62 | 4.86 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota/rotated_rtmdet_s-3x-dota-11f6ccf5.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota/rotated_rtmdet_s-3x-dota_20221124_081442.json) | +| RTMDet-s | IN | MS+RR | 54.43 | 79.98 | 60.07 | 8.86 | 37.62 | 4.86 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms/rotated_rtmdet_s-3x-dota_ms-20ead048.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_s-3x-dota_ms/rotated_rtmdet_s-3x-dota_ms_20221113_201055.json) | +| RTMDet-m | IN | RR | 50.56 | 78.24 | 54.47 | 24.67 | 99.76 | 7.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota/rotated_rtmdet_m-3x-dota-beeadda6.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota/rotated_rtmdet_m-3x-dota_20221122_011234.json) | +| RTMDet-m | IN | MS+RR | 55.00 | 80.26 | 61.26 | 24.67 | 99.76 | 7.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms/rotated_rtmdet_m-3x-dota_ms-c71eb375.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_m-3x-dota_ms/rotated_rtmdet_m-3x-dota_ms_20221122_011234.json) | +| RTMDet-l | IN | RR | 51.01 | 78.85 | 55.21 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-3x-dota.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota/rotated_rtmdet_l-3x-dota-23992372.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota/rotated_rtmdet_l-3x-dota_20221122_011241.json) | +| RTMDet-l | IN | MS+RR | 55.52 | 80.54 | 61.47 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms/rotated_rtmdet_l-3x-dota_ms-2738da34.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-3x-dota_ms/rotated_rtmdet_l-3x-dota_ms_20221122_011241.json) | +| RTMDet-l | COCO | MS+RR | 56.74 | 81.33 | 63.45 | 52.27 | 204.21 | 10.82 | [config](https://github.com/open-mmlab/mmrotate/edit/1.x/configs/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms.py) | [model](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms/rotated_rtmdet_l-coco_pretrain-3x-dota_ms-06d248a2.pth) \| [log](https://download.openmmlab.com/mmrotate/v1.0/rotated_rtmdet/rotated_rtmdet_l-coco_pretrain-3x-dota_ms/rotated_rtmdet_l-coco_pretrain-3x-dota_ms_20221113_202010.json) | + +### Classification + +We also provide the imagenet classification configs of the RTMDet backbone. Find more details in the [classification folder](./classification). + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | + ## Citation ```latex @@ -81,3 +105,15 @@ Models and configs of RTMDet-R are available in [MMRotate](https://github.com/op
+ +## Deployment Tutorial + +### Step1. Install MMDeploy + +### Step2. Export Model + +#### ONNX + +#### TensorRT + +### Step3. Inference with SDK diff --git a/configs/rtmdet/cspnext_imagenet_pretrain/README.md b/configs/rtmdet/classification/README.md similarity index 52% rename from configs/rtmdet/cspnext_imagenet_pretrain/README.md rename to configs/rtmdet/classification/README.md index 2db5a50ec5e..dbfef4c7249 100644 --- a/configs/rtmdet/cspnext_imagenet_pretrain/README.md +++ b/configs/rtmdet/classification/README.md @@ -47,7 +47,10 @@ More details can be found in [user guides](https://mmdetection.readthedocs.io/en ## Results and Models -| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | -| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------------------------------------------------: | -| CSPNeXt-tiny | 224x224 | 2.73 | 0.339 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth) | -| CSPNeXt-s | 224x224 | 4.89 | 0.664 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth) | +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :---------------------------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.34 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e-3a2dd350.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.66 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e-ea671761.pth) | +| CSPNeXt-m | 224x224 | 13.05 | 1.93 | 79.27 | 94.79 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth) | +| CSPNeXt-l | 224x224 | 27.16 | 4.19 | 81.30 | 95.62 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth) | +| CSPNeXt-x | 224x224 | 48.85 | 7.76 | 82.10 | 95.69 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-x_8xb256-rsb-a1-600e_in1k-b3f78edd.pth) | diff --git a/configs/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py b/configs/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 00000000000..d2e70539f05 --- /dev/null +++ b/configs/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=1, widen_factor=1), + head=dict(in_channels=1024)) diff --git a/configs/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py b/configs/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 00000000000..e1b1352dd91 --- /dev/null +++ b/configs/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=0.67, widen_factor=0.75), + head=dict(in_channels=768)) diff --git a/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py b/configs/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py similarity index 93% rename from configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py rename to configs/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py index 2c88ee18b02..08293efcd9c 100644 --- a/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py +++ b/configs/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py @@ -30,8 +30,8 @@ loss_weight=1.0), topk=(1, 5)), train_cfg=dict(augments=[ - dict(type='Mixup', alpha=0.2, num_classes=1000), - dict(type='CutMix', alpha=1.0, num_classes=1000) + dict(type='Mixup', alpha=0.2), + dict(type='CutMix', alpha=1.0) ])) # dataset settings diff --git a/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py b/configs/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py similarity index 100% rename from configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py rename to configs/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py diff --git a/configs/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py b/configs/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 00000000000..edec48d78db --- /dev/null +++ b/configs/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=1.33, widen_factor=1.25), + head=dict(in_channels=1280)) From 63031157601bc3e9b075b9aca7b2077e4297d95a Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Thu, 12 Jan 2023 14:50:30 +0800 Subject: [PATCH 2/2] update readme --- configs/rtmdet/README.md | 209 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 206 insertions(+), 3 deletions(-) diff --git a/configs/rtmdet/README.md b/configs/rtmdet/README.md index cef0be5608d..08e6860b70f 100644 --- a/configs/rtmdet/README.md +++ b/configs/rtmdet/README.md @@ -108,12 +108,215 @@ We also provide the imagenet classification configs of the RTMDet backbone. Find ## Deployment Tutorial +Here is a basic example of deploy RTMDet with [MMDeploy-1.x](https://github.com/open-mmlab/mmdeploy/tree/1.x). + ### Step1. Install MMDeploy -### Step2. Export Model +Before starting the deployment, please make sure you install MMDetection-3.x and MMDeploy-1.x correctly. + +- Install MMDetection-3.x, please refer to the [MMDetection-3.x installation guide](https://mmdetection.readthedocs.io/en/3.x/get_started.html). +- Install MMDeploy-1.x, please refer to the [MMDeploy-1.x installation guide](https://mmdeploy.readthedocs.io/en/1.x/get_started.html#installation). + +If you want to deploy RTMDet with ONNXRuntime, TensorRT, or other inference engine, +please make sure you have installed the corresponding dependencies and MMDeploy precompiled packages. + +### Step2. Convert Model + +After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running MMDeploy's `tools/deploy.py`. + +The detailed model conversion tutorial please refer to the [MMDeploy document](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/convert_model.html). +Here we only give the example of converting RTMDet. + +MMDeploy supports converting dynamic and static models. Dynamic models support different input shape, but the inference speed is slower than static models. +To achieve the best performance, we suggest converting RTMDet with static setting. + +- If you only want to use ONNX, please use [`configs/mmdet/detection/detection_onnxruntime_static.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_onnxruntime_static.py) as the deployment config. +- If you want to use TensorRT, please use [`configs/mmdet/detection/detection_tensorrt_static-640x640.py`](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_tensorrt_static-640x640.py). + +If you want to customize the settings in the deployment config for your requirements, please refer to [MMDeploy config tutorial](https://mmdeploy.readthedocs.io/en/1.x/02-how-to-run/write_config.html). + +After preparing the deployment config, you can run the `tools/deploy.py` script to convert your model. +Here we take converting RTMDet-s to TensorRT as an example: + +```shell +# go to the mmdeploy folder +cd ${PATH_TO_MMDEPLOY} + +# download RTMDet-s checkpoint +wget -P checkpoint https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_s_8xb32-300e_coco/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth + +# run the command to start model conversion +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-640x640.py \ + ${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \ + checkpoint/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth \ + demo/resources/det.jpg \ + --work-dir ./work_dirs/rtmdet \ + --device cuda:0 \ + --show +``` + +If the script runs successfully, you will see the following files: -#### ONNX +``` +|----work_dirs + |----rtmdet + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file +``` -#### TensorRT +After this, you can check the inference results with MMDeploy Model Converter API: + +```python +from mmdeploy.apis import inference_model + +result = inference_model( + model_cfg='${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py', + deploy_cfg='${PATH_TO_MMDEPLOY}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py', + backend_files=['work_dirs/rtmdet/end2end.engine'], + img='demo/resources/det.jpg', + device='cuda:0') +``` + +#### Advanced Setting + +To convert the model with TRT-FP16, you can enable the fp16 mode in your deploy config: + +```python +# in MMDeploy config +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True # enable fp16 + )) +``` + +To reduce the end to end inference speed with the inference engine, we suggest you to adjust the post-processing setting of the model. +We set a very low score threshold during training and testing to achieve better COCO mAP. +However, in actual usage scenarios, a relatively high score threshold (e.g. 0.3) is usually used. + +You can adjust the score threshold and the number of detection boxes in your model config according to the actual usage to reduce the time-consuming of post-processing. + +```python +# in MMDetection config +model = dict( + test_cfg=dict( + nms_pre=1000, # keep top-k score bboxes before nms + min_bbox_size=0, + score_thr=0.3, # score threshold to filter bboxes + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=100) # only keep top-100 as the final results. +) +``` ### Step3. Inference with SDK + +We provide both Python and C++ inference API with MMDeploy SDK. + +To use SDK, you need to dump the required info during converting the model. Just add `--dump-info` to the model conversion command: + +```shell +python tools/deploy.py \ + configs/mmdet/detection/detection_tensorrt_static-640x640.py \ + ${PATH_TO_MMDET}/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \ + checkpoint/rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth \ + demo/resources/det.jpg \ + --work-dir ./work_dirs/rtmdet-sdk \ + --device cuda:0 \ + --show \ + --dump-info # dump sdk info +``` + +After running the command, it will dump 3 json files additionally for the SDK: + +``` +|----work_dirs + |----rtmdet-sdk + |----end2end.onnx # ONNX model + |----end2end.engine # TensorRT engine file + # json files for the SDK + |----pipeline.json + |----deploy.json + |----detail.json +``` + +#### Python API + +Here is a basic example of SDK Python API: + +```python +from mmdeploy_python import Detector +import cv2 + +img = cv2.imread('demo/resources/det.jpg') + +# create a detector +detector = Detector(model_path='work_dirs/rtmdet-sdk', device_name='cuda', device_id=0) +# run the inference +bboxes, labels, _ = detector(img) +# Filter the result according to threshold +indices = [i for i in range(len(bboxes))] +for index, bbox, label_id in zip(indices, bboxes, labels): + [left, top, right, bottom], score = bbox[0:4].astype(int), bbox[4] + if score < 0.3: + continue + # draw bbox + cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0)) + +cv2.imwrite('output_detection.png', img) +``` + +#### C++ API + +Here is a basic example of SDK C++ API: + +```C++ +#include +#include +#include "mmdeploy/detector.hpp" + +int main() { + const char* device_name = "cuda"; + int device_id = 0; + std::string model_path = "work_dirs/rtmdet-sdk"; + std::string image_path = "demo/resources/det.jpg"; + + // 1. load model + mmdeploy::Model model(model_path); + // 2. create predictor + mmdeploy::Detector detector(model, mmdeploy::Device{device_name, device_id}); + // 3. read image + cv::Mat img = cv::imread(image_path); + // 4. inference + auto dets = detector.Apply(img); + // 5. deal with the result. Here we choose to visualize it + for (int i = 0; i < dets.size(); ++i) { + const auto& box = dets[i].bbox; + fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, box.left, box.top, box.right, box.bottom, dets[i].label_id, dets[i].score); + if (bboxes[i].score < 0.3) { + continue; + } + cv::rectangle(img, cv::Point{(int)box.left, (int)box.top}, + cv::Point{(int)box.right, (int)box.bottom}, cv::Scalar{0, 255, 0}); + } + cv::imwrite("output_detection.png", img); + return 0; +} +``` + +To build C++ example, please add MMDeploy package in your CMake project as following: + +```cmake +find_package(MMDeploy REQUIRED) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) +``` + +#### Other languages + +- [C# API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) +- [JAVA API Examples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) + +### Deploy RTMDet Instance Segmentation Model + +Coming soon!