From 192af2162b1d19511dbbf3a61cc9c83fa13fb647 Mon Sep 17 00:00:00 2001
From: zwhus <1062894314zwh@gmail.com>
Date: Fri, 17 Feb 2023 14:23:20 +0800
Subject: [PATCH 01/13] efficientdet
---
projects/EfficientDet/REMEAD.md | 130 ----
projects/EfficientDet/configs/d0.py | 164 +++++
...16xb8-crop512-300e_coco.py => d0_90_xy.py} | 8 +-
projects/EfficientDet/configs/d0_huber.py | 167 +++++
.../EfficientDet/configs/d0_huber_clip.py | 176 +++++
projects/EfficientDet/configs/d0_xy.py | 168 +++++
projects/EfficientDet/configs/d3.py | 175 +++++
...k2former_r50_8xb2-lsj-50e_coco-panoptic.py | 241 +++++++
projects/EfficientDet/configs/mask_fpn.py | 240 +++++++
projects/EfficientDet/configs/retinanet.py | 119 ++++
projects/EfficientDet/convert_tf_to_pt.py | 627 ------------------
.../EfficientDet/efficientdet/__init__.py | 6 +-
.../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 868 bytes
.../anchor_generator.cpython-37.pyc | Bin 0 -> 2831 bytes
.../__pycache__/bifpn.cpython-37.pyc | Bin 0 -> 6544 bytes
.../__pycache__/bifpn_no_bn.cpython-37.pyc | Bin 0 -> 9256 bytes
.../__pycache__/coco_90class.cpython-37.pyc | Bin 0 -> 6437 bytes
.../__pycache__/coco_90metric.cpython-37.pyc | Bin 0 -> 15187 bytes
.../__pycache__/efficientdet.cpython-37.pyc | Bin 0 -> 986 bytes
.../efficientdet_head.cpython-37.pyc | Bin 0 -> 7803 bytes
.../efficientdet_head_huber.cpython-37.pyc | Bin 0 -> 9488 bytes
.../__pycache__/final.cpython-37.pyc | Bin 0 -> 6880 bytes
.../__pycache__/final_mmcv.cpython-37.pyc | Bin 0 -> 6894 bytes
.../__pycache__/final_syncbn.cpython-37.pyc | Bin 0 -> 6676 bytes
.../__pycache__/huber_loss.cpython-37.pyc | Bin 0 -> 2963 bytes
.../__pycache__/kaming_bifpn.cpython-37.pyc | Bin 0 -> 9925 bytes
.../__pycache__/mask2former.cpython-37.pyc | Bin 0 -> 15062 bytes
.../__pycache__/new_bifpn.cpython-37.pyc | Bin 0 -> 9894 bytes
.../__pycache__/ori_bifpn.cpython-37.pyc | Bin 0 -> 9635 bytes
.../ori_bifpn_nodepth.cpython-37.pyc | Bin 0 -> 9921 bytes
.../__pycache__/ori_noinit.cpython-37.pyc | Bin 0 -> 10121 bytes
.../__pycache__/ori_sync.cpython-37.pyc | Bin 0 -> 10372 bytes
.../retina_sepbn_head.cpython-37.pyc | Bin 0 -> 4023 bytes
.../trans_max_iou_assigner.cpython-37.pyc | Bin 0 -> 4223 bytes
.../__pycache__/utils.cpython-37.pyc | Bin 0 -> 4822 bytes
.../__pycache__/utils_mmcv.cpython-37.pyc | Bin 0 -> 4114 bytes
.../__pycache__/utils_syncbn.cpython-37.pyc | Bin 0 -> 5211 bytes
.../yxyx_bbox_coder.cpython-37.pyc | Bin 0 -> 11605 bytes
.../efficientdet/anchor_generator.py | 13 -
.../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 299 bytes
.../__pycache__/coco_api.cpython-37.pyc | Bin 0 -> 4839 bytes
.../efficientdet/api_wrappers/coco_api.py | 1 -
projects/EfficientDet/efficientdet/bifpn.py | 53 +-
.../efficientdet/efficientdet_head.py | 98 ++-
.../efficientdet/efficientdet_head_huber.py | 261 ++++++++
.../EfficientDet/efficientdet/huber_loss.py | 91 +++
projects/EfficientDet/efficientdet/utils.py | 81 +--
.../efficientdet/yxyx_bbox_coder.py | 19 -
48 files changed, 1955 insertions(+), 883 deletions(-)
delete mode 100644 projects/EfficientDet/REMEAD.md
create mode 100644 projects/EfficientDet/configs/d0.py
rename projects/EfficientDet/configs/{efficientdet_effb0_bifpn_16xb8-crop512-300e_coco.py => d0_90_xy.py} (96%)
create mode 100644 projects/EfficientDet/configs/d0_huber.py
create mode 100644 projects/EfficientDet/configs/d0_huber_clip.py
create mode 100644 projects/EfficientDet/configs/d0_xy.py
create mode 100644 projects/EfficientDet/configs/d3.py
create mode 100644 projects/EfficientDet/configs/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
create mode 100644 projects/EfficientDet/configs/mask_fpn.py
create mode 100644 projects/EfficientDet/configs/retinanet.py
delete mode 100644 projects/EfficientDet/convert_tf_to_pt.py
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/__init__.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/anchor_generator.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/bifpn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/bifpn_no_bn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/coco_90class.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/coco_90metric.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/efficientdet.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/efficientdet_head.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/efficientdet_head_huber.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/final.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/final_mmcv.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/final_syncbn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/huber_loss.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/kaming_bifpn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/mask2former.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/new_bifpn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/ori_bifpn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/ori_bifpn_nodepth.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/ori_noinit.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/ori_sync.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/retina_sepbn_head.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/trans_max_iou_assigner.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/utils.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/utils_mmcv.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/utils_syncbn.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/__pycache__/yxyx_bbox_coder.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/api_wrappers/__pycache__/__init__.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/api_wrappers/__pycache__/coco_api.cpython-37.pyc
create mode 100644 projects/EfficientDet/efficientdet/efficientdet_head_huber.py
create mode 100644 projects/EfficientDet/efficientdet/huber_loss.py
diff --git a/projects/EfficientDet/REMEAD.md b/projects/EfficientDet/REMEAD.md
deleted file mode 100644
index 7bc073f0df5..00000000000
--- a/projects/EfficientDet/REMEAD.md
+++ /dev/null
@@ -1,130 +0,0 @@
-# EfficientDet
-
-> [**EfficientDet: Scalable and Efficient Object Detection**](https://arxiv.org/pdf/1911.09070.pdf),
-> Mingxing Tan, Ruoming Pang, Quoc V. Le,
-> *CVPR 2020*
-
-## Abstract
-
-This is an implementation of [EfficientDet](https://github.com/google/automl) based on [MMDetection](https://github.com/open-mmlab/mmdetection/tree/3.x), [MMCV](https://github.com/open-mmlab/mmcv), and [MMEngine](https://github.com/open-mmlab/mmengine).
-
-EfficientDet a new family of object detectors, which consistently achieve much better efficiency than prior art across a wide
-spectrum of resource constraints.
-In particular, with single model and single-scale, EfficientDet-D7 achieves stateof-the-art 55.1 AP on COCO test-dev with 77M parameters and 410B FLOP.
-
-BiFPN is a simple yet highly effective weighted bi-directional feature pyramid network, which introduces learnable weights to learn the importance of different input features, while repeatedly applying topdown and bottom-up multi-scale feature fusion.
-
-In contrast to other feature pyramid network, such as FPN, FPN + PAN, NAS-FPN, BiFPN achieves the best accuracy with fewer parameters and FLOPs.
-
-
-

-
-
-## Usage
-
-### Model conversion
-
-Firstly, download EfficientDet [weights](https://github.com/google/automl/tree/master/efficientdet) and unzip, please use the following command
-
-```bash
-tar -xzvf {EFFICIENTDET_WEIGHT}
-```
-
-Then, install tensorflow, please use the following command
-
-```bash
-pip install tensorflow-gpu==2.6.0
-```
-
-Lastly, convert weights from tensorflow to pytorch, please use the following command
-
-```bash
-python projects/EfficientDet/convert_tf_to_pt.py --backbone {BACKBONE_NAME} --tensorflow_weight {TENSORFLOW_WEIGHT_PATH} --out_weight {OUT_PATH}
-```
-
-### Testing commands
-
-In MMDetection's root directory, run the following command to test the model:
-
-```bash
-python tools/test.py projects/EfficientDet/configs/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco.py ${CHECKPOINT_PATH}
-```
-
-## Results
-
-Based on mmdetection, this project aligns the test accuracy of the [official model](https://github.com/google/automl).
-
-If you want to reproduce the test results, you need to convert model weights first, then run the test command.
-
-The training accuracy will also be aligned with the official in the future
-
-| Method | Backbone | Pretrained Model | Training set | Test set | Epoch | Val Box AP | Official AP |
-| :------------------------------------------------------------------------------: | :-------------: | :--------------: | :------------: | :----------: | :---: | :--------: | :---------: |
-| [efficientdet-d0](./configs/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco.py) | efficientnet-b0 | ImageNet | COCO2017 Train | COCO2017 Val | 300 | 34.4 | 34.3 |
-
-## Citation
-
-```BibTeX
-@inproceedings{tan2020efficientdet,
- title={Efficientdet: Scalable and efficient object detection},
- author={Tan, Mingxing and Pang, Ruoming and Le, Quoc V},
- booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
- pages={10781--10790},
- year={2020}
-}
-```
-
-## Checklist
-
-
-
-- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.
-
- - [x] Finish the code
-
-
-
- - [x] Basic docstrings & proper citation
-
-
-
- - [x] Test-time correctness
-
-
-
- - [x] A full README
-
-
-
-- [ ] Milestone 2: Indicates a successful model implementation.
-
- - [ ] Training-time correctness
-
-
-
-- [ ] Milestone 3: Good to be a part of our core package!
-
- - [ ] Type hints and docstrings
-
-
-
- - [ ] Unit tests
-
-
-
- - [ ] Code polishing
-
-
-
- - [ ] Metafile.yml
-
-
-
-- [ ] Move your modules into the core package following the codebase's file hierarchy structure.
-
-
-
-- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.
diff --git a/projects/EfficientDet/configs/d0.py b/projects/EfficientDet/configs/d0.py
new file mode 100644
index 00000000000..d749ab9e885
--- /dev/null
+++ b/projects/EfficientDet/configs/d0.py
@@ -0,0 +1,164 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 512
+datasettype = 'Coco90Dataset'
+evalute_type = 'Coco90Metric'
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
+model = dict(
+ type='EfficientDet',
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=image_size,
+ batch_augments=batch_augments),
+ backbone=dict(
+ type='EfficientNet',
+ arch='b0',
+ drop_path_rate=0.2,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ type='BiFPN',
+ num_stages=3,
+ in_channels=[40, 112, 320],
+ out_channels=64,
+ start_level=0,
+ norm_cfg=norm_cfg),
+ bbox_head=dict(
+ type='EfficientDetSepBNHead',
+ num_classes=90,
+ num_ins=5,
+ in_channels=64,
+ feat_channels=64,
+ stacked_convs=3,
+ norm_cfg=norm_cfg,
+ anchor_generator=dict(
+ type='YXYXAnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[1.0, 0.5, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ center_offset=0.5),
+ bbox_coder=dict(
+ type='YXYXDeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
+ # training and testing settings
+ train_cfg=dict(
+ assigner=dict(
+ type='TransMaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='PseudoSampler'), # Focal loss should use PseudoSampler
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ test_cfg=dict(
+ nms_pre=1000,
+ min_bbox_size=0,
+ score_thr=0.05,
+ nms=dict(
+ type='soft_nms',
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='gaussian'),
+ max_per_img=100))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16,
+ num_workers=16,
+ dataset=dict(type=datasettype, pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(type=datasettype, pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+val_evaluator = dict(type='Coco90Metric')
+test_evaluator = val_evaluator
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16),
+ paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
+
+# learning policy
+max_epochs = 300
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
+ dict(
+ type='CosineAnnealingLR',
+ eta_min=0.0016,
+ begin=1,
+ T_max=284,
+ end=285,
+ by_epoch=True,
+ convert_to_iter_based=True)
+]
+train_cfg = dict(max_epochs=max_epochs, val_interval=1)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x (32 samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/configs/efficientdet_effb0_bifpn_16xb8-crop512-300e_coco.py b/projects/EfficientDet/configs/d0_90_xy.py
similarity index 96%
rename from projects/EfficientDet/configs/efficientdet_effb0_bifpn_16xb8-crop512-300e_coco.py
rename to projects/EfficientDet/configs/d0_90_xy.py
index 080b7963b95..1eb198a6a66 100644
--- a/projects/EfficientDet/configs/efficientdet_effb0_bifpn_16xb8-crop512-300e_coco.py
+++ b/projects/EfficientDet/configs/d0_90_xy.py
@@ -49,14 +49,14 @@
stacked_convs=3,
norm_cfg=norm_cfg,
anchor_generator=dict(
- type='YXYXAnchorGenerator',
+ type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[1.0, 0.5, 2.0],
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
- type='YXYXDeltaXYWHBBoxCoder',
+ type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
@@ -69,7 +69,7 @@
# training and testing settings
train_cfg=dict(
assigner=dict(
- type='TransMaxIoUAssigner',
+ type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
@@ -125,7 +125,7 @@
val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
test_dataloader = val_dataloader
-val_evaluator = dict(type=evalute_type)
+val_evaluator = dict(type='Coco90Metric')
test_evaluator = val_evaluator
optim_wrapper = dict(
diff --git a/projects/EfficientDet/configs/d0_huber.py b/projects/EfficientDet/configs/d0_huber.py
new file mode 100644
index 00000000000..84bf280b437
--- /dev/null
+++ b/projects/EfficientDet/configs/d0_huber.py
@@ -0,0 +1,167 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 512
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
+model = dict(
+ type='EfficientDet',
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=image_size,
+ batch_augments=batch_augments),
+ backbone=dict(
+ type='EfficientNet',
+ arch='b0',
+ drop_path_rate=0.2,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ conv_cfg=dict(type='Conv2dSamePadding'),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ type='BiFPN',
+ num_stages=3,
+ in_channels=[40, 112, 320],
+ out_channels=64,
+ start_level=0,
+ norm_cfg=norm_cfg),
+ bbox_head=dict(
+ type='EfficientDetSepBNHead_Huber',
+ num_classes=80,
+ num_ins=5,
+ in_channels=64,
+ feat_channels=64,
+ stacked_convs=3,
+ norm_cfg=norm_cfg,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[1.0, 0.5, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ center_offset=0.5),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=1.5,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
+ # training and testing settings
+ train_cfg=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='PseudoSampler'), # Focal loss should use PseudoSampler
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ test_cfg=dict(
+ nms_pre=1000,
+ min_bbox_size=0,
+ score_thr=0.05,
+ nms=dict(
+ type='soft_nms',
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='gaussian'),
+ max_per_img=100))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16, num_workers=16, dataset=dict(pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16, weight_decay=4e-5),
+ paramwise_cfg=dict(
+ norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
+
+# learning policy
+max_epochs = 300
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
+ dict(
+ type='CosineAnnealingLR',
+ eta_min=0.0,
+ begin=1,
+ T_max=299,
+ end=300,
+ by_epoch=True,
+ convert_to_iter_based=True)
+]
+train_cfg = dict(max_epochs=max_epochs, val_interval=1)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
+custom_hooks = [
+ dict(
+ type='EMAHook',
+ ema_type='ExpMomentumEMA',
+ momentum=0.0002,
+ update_buffers=True,
+ priority=49)
+]
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x (32 samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/configs/d0_huber_clip.py b/projects/EfficientDet/configs/d0_huber_clip.py
new file mode 100644
index 00000000000..688332a1b28
--- /dev/null
+++ b/projects/EfficientDet/configs/d0_huber_clip.py
@@ -0,0 +1,176 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 512
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+dataset_type = 'Coco90Dataset'
+evalute_type = 'Coco90Metric'
+norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
+model = dict(
+ type='EfficientDet',
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=image_size,
+ batch_augments=batch_augments),
+ backbone=dict(
+ type='EfficientNet',
+ arch='b0',
+ drop_path_rate=0.2,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ conv_cfg=dict(type='Conv2dSamePadding'),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ type='BiFPN',
+ num_stages=3,
+ in_channels=[40, 112, 320],
+ out_channels=64,
+ start_level=0,
+ norm_cfg=norm_cfg,
+ use_meswish=False),
+ bbox_head=dict(
+ type='EfficientDetSepBNHead_Huber',
+ num_classes=90,
+ num_ins=5,
+ in_channels=64,
+ feat_channels=64,
+ stacked_convs=3,
+ norm_cfg=norm_cfg,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[1.0, 0.5, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ center_offset=0.5),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=1.5,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
+ # training and testing settings
+ train_cfg=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='PseudoSampler'), # Focal loss should use PseudoSampler
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ test_cfg=dict(
+ nms_pre=1000,
+ min_bbox_size=0,
+ score_thr=0.05,
+ nms=dict(
+ type='soft_nms',
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='gaussian'),
+ max_per_img=100))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16,
+ num_workers=16,
+ dataset=dict(type=dataset_type, pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+val_evaluator = dict(type='Coco90Metric')
+test_evaluator = val_evaluator
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16, weight_decay=4e-5),
+ paramwise_cfg=dict(
+ norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
+ clip_grad=dict(max_norm=10, norm_type=2))
+
+# learning policy
+max_epochs = 300
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
+ dict(
+ type='CosineAnnealingLR',
+ eta_min=0.0,
+ begin=1,
+ T_max=299,
+ end=300,
+ by_epoch=True,
+ convert_to_iter_based=True)
+]
+train_cfg = dict(max_epochs=max_epochs, val_interval=1)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
+custom_hooks = [
+ dict(
+ type='EMAHook',
+ ema_type='ExpMomentumEMA',
+ momentum=0.0002,
+ update_buffers=True,
+ priority=49)
+]
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x (32 samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/configs/d0_xy.py b/projects/EfficientDet/configs/d0_xy.py
new file mode 100644
index 00000000000..c2afcd1e287
--- /dev/null
+++ b/projects/EfficientDet/configs/d0_xy.py
@@ -0,0 +1,168 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 512
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth' # noqa
+model = dict(
+ type='EfficientDet',
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=image_size,
+ batch_augments=batch_augments),
+ backbone=dict(
+ type='EfficientNet',
+ arch='b0',
+ drop_path_rate=0.2,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ conv_cfg=dict(type='Conv2dSamePadding'),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ type='BiFPN',
+ num_stages=3,
+ in_channels=[40, 112, 320],
+ out_channels=64,
+ start_level=0,
+ norm_cfg=norm_cfg),
+ bbox_head=dict(
+ type='EfficientDetSepBNHead',
+ num_classes=80,
+ num_ins=5,
+ in_channels=64,
+ feat_channels=64,
+ stacked_convs=3,
+ norm_cfg=norm_cfg,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[1.0, 0.5, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ center_offset=0.5),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
+ # training and testing settings
+ train_cfg=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='PseudoSampler'), # Focal loss should use PseudoSampler
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ test_cfg=dict(
+ nms_pre=1000,
+ min_bbox_size=0,
+ score_thr=0.05,
+ nms=dict(
+ type='soft_nms',
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='gaussian'),
+ max_per_img=100))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16, num_workers=16, dataset=dict(pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16, weight_decay=4e-5),
+ paramwise_cfg=dict(
+ norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
+ clip_grad=dict(max_norm=10, norm_type=2))
+
+# learning policy
+max_epochs = 300
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
+ dict(
+ type='CosineAnnealingLR',
+ eta_min=0.0,
+ begin=1,
+ T_max=299,
+ end=300,
+ by_epoch=True,
+ convert_to_iter_based=True)
+]
+train_cfg = dict(max_epochs=max_epochs, val_interval=1)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
+custom_hooks = [
+ dict(
+ type='EMAHook',
+ ema_type='ExpMomentumEMA',
+ momentum=0.0002,
+ update_buffers=True,
+ priority=49)
+]
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x (32 samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/configs/d3.py b/projects/EfficientDet/configs/d3.py
new file mode 100644
index 00000000000..e0c59956317
--- /dev/null
+++ b/projects/EfficientDet/configs/d3.py
@@ -0,0 +1,175 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 896
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+dataset_type = 'Coco90Dataset'
+evalute_type = 'Coco90Metric'
+norm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth' # noqa
+model = dict(
+ type='EfficientDet',
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=image_size,
+ batch_augments=batch_augments),
+ backbone=dict(
+ type='EfficientNet',
+ arch='b3',
+ drop_path_rate=0.3,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ conv_cfg=dict(type='Conv2dSamePadding'),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ type='BiFPN',
+ num_stages=6,
+ in_channels=[48, 136, 384],
+ out_channels=160,
+ start_level=0,
+ norm_cfg=norm_cfg),
+ bbox_head=dict(
+ type='EfficientDetSepBNHead_Huber',
+ num_classes=90,
+ num_ins=5,
+ in_channels=160,
+ feat_channels=160,
+ stacked_convs=4,
+ norm_cfg=norm_cfg,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[1.0, 0.5, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ center_offset=0.5),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=1.5,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='HuberLoss', beta=0.1, loss_weight=50)),
+ # training and testing settings
+ train_cfg=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='PseudoSampler'), # Focal loss should use PseudoSampler
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ test_cfg=dict(
+ nms_pre=1000,
+ min_bbox_size=0,
+ score_thr=0.05,
+ nms=dict(
+ type='soft_nms',
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='gaussian'),
+ max_per_img=100))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16,
+ num_workers=16,
+ dataset=dict(type=dataset_type, pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+val_evaluator = dict(type='Coco90Metric')
+test_evaluator = val_evaluator
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16, weight_decay=4e-5),
+ paramwise_cfg=dict(
+ norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True),
+ clip_grad=dict(max_norm=10, norm_type=2))
+
+# learning policy
+max_epochs = 300
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),
+ dict(
+ type='CosineAnnealingLR',
+ eta_min=0.0,
+ begin=1,
+ T_max=299,
+ end=300,
+ by_epoch=True,
+ convert_to_iter_based=True)
+]
+train_cfg = dict(max_epochs=max_epochs, val_interval=1)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))
+custom_hooks = [
+ dict(
+ type='EMAHook',
+ ema_type='ExpMomentumEMA',
+ momentum=0.0002,
+ update_buffers=True,
+ priority=49)
+]
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x (32 samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/configs/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py b/projects/EfficientDet/configs/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
new file mode 100644
index 00000000000..639b2181ba3
--- /dev/null
+++ b/projects/EfficientDet/configs/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
@@ -0,0 +1,241 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_panoptic.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.EfficientDet'], allow_failed_imports=False)
+
+image_size = (1024, 1024)
+batch_augments = [
+ dict(
+ type='BatchFixedSizePad',
+ size=image_size,
+ img_pad_value=0,
+ pad_mask=True,
+ mask_pad_value=0,
+ pad_seg=True,
+ seg_pad_value=255)
+]
+data_preprocessor = dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=32,
+ pad_mask=True,
+ mask_pad_value=0,
+ pad_seg=True,
+ seg_pad_value=255,
+ batch_augments=batch_augments)
+
+num_things_classes = 80
+num_stuff_classes = 53
+num_classes = num_things_classes + num_stuff_classes
+model = dict(
+ type='Mask2Former',
+ data_preprocessor=data_preprocessor,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ norm_cfg=dict(type='BN', requires_grad=False),
+ norm_eval=True,
+ style='pytorch',
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
+ panoptic_head=dict(
+ type='Mask2FormerHead',
+ in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside
+ strides=[4, 8, 16, 32],
+ feat_channels=256,
+ out_channels=256,
+ num_things_classes=num_things_classes,
+ num_stuff_classes=num_stuff_classes,
+ num_queries=100,
+ num_transformer_feat_level=3,
+ pixel_decoder=dict(
+ type='PixelBiFPNDecoder',
+ stages=6,
+ num_outs=3,
+ norm_cfg=dict(type='GN', num_groups=32),
+ act_cfg=dict(type='ReLU')),
+ enforce_decoder_input_project=False,
+ positional_encoding=dict(
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
+ transformer_decoder=dict(
+ type='DetrTransformerDecoder',
+ return_intermediate=True,
+ num_layers=9,
+ transformerlayers=dict(
+ type='DetrTransformerDecoderLayer',
+ attn_cfgs=dict(
+ type='MultiheadAttention',
+ embed_dims=256,
+ num_heads=8,
+ attn_drop=0.0,
+ proj_drop=0.0,
+ dropout_layer=None,
+ batch_first=False),
+ ffn_cfgs=dict(
+ embed_dims=256,
+ feedforward_channels=2048,
+ num_fcs=2,
+ act_cfg=dict(type='ReLU', inplace=True),
+ ffn_drop=0.0,
+ dropout_layer=None,
+ add_identity=True),
+ feedforward_channels=2048,
+ operation_order=('cross_attn', 'norm', 'self_attn', 'norm',
+ 'ffn', 'norm')),
+ init_cfg=None),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=2.0,
+ reduction='mean',
+ class_weight=[1.0] * num_classes + [0.1]),
+ loss_mask=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ reduction='mean',
+ loss_weight=5.0),
+ loss_dice=dict(
+ type='DiceLoss',
+ use_sigmoid=True,
+ activate=True,
+ reduction='mean',
+ naive_dice=True,
+ eps=1.0,
+ loss_weight=5.0)),
+ panoptic_fusion_head=dict(
+ type='MaskFormerFusionHead',
+ num_things_classes=num_things_classes,
+ num_stuff_classes=num_stuff_classes,
+ loss_panoptic=None,
+ init_cfg=None),
+ train_cfg=dict(
+ num_points=12544,
+ oversample_ratio=3.0,
+ importance_sample_ratio=0.75,
+ assigner=dict(
+ type='HungarianAssigner',
+ match_costs=[
+ dict(type='ClassificationCost', weight=2.0),
+ dict(
+ type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),
+ dict(type='DiceCost', weight=5.0, pred_act=True, eps=1.0)
+ ]),
+ sampler=dict(type='MaskPseudoSampler')),
+ test_cfg=dict(
+ panoptic_on=True,
+ # For now, the dataset does not support
+ # evaluating semantic segmentation metric.
+ semantic_on=False,
+ instance_on=True,
+ # max_per_image is for instance segmentation.
+ max_per_image=100,
+ iou_thr=0.8,
+ # In Mask2Former's panoptic postprocessing,
+ # it will filter mask area where score is less than 0.5 .
+ filter_low_score=True),
+ init_cfg=None)
+
+# dataset settings
+data_root = 'data/coco/'
+train_pipeline = [
+ dict(type='LoadImageFromFile', to_float32=True),
+ dict(
+ type='LoadPanopticAnnotations',
+ with_bbox=True,
+ with_mask=True,
+ with_seg=True),
+ dict(type='RandomFlip', prob=0.5),
+ # large scale jittering
+ dict(
+ type='RandomResize',
+ scale=image_size,
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(
+ type='RandomCrop',
+ crop_size=image_size,
+ crop_type='absolute',
+ recompute_bbox=True,
+ allow_negative_crop=True),
+ dict(type='PackDetInputs')
+]
+
+train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
+
+val_evaluator = [
+ dict(
+ type='CocoPanopticMetric',
+ ann_file=data_root + 'annotations/panoptic_val2017.json',
+ seg_prefix=data_root + 'annotations/panoptic_val2017/',
+ ),
+ dict(
+ type='CocoMetric',
+ ann_file=data_root + 'annotations/instances_val2017.json',
+ metric=['bbox', 'segm'],
+ )
+]
+test_evaluator = val_evaluator
+
+# optimizer
+embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
+optim_wrapper = dict(
+ type='OptimWrapper',
+ optimizer=dict(
+ type='AdamW',
+ lr=0.0001,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999)),
+ paramwise_cfg=dict(
+ custom_keys={
+ 'backbone': dict(lr_mult=0.1, decay_mult=1.0),
+ 'query_embed': embed_multi,
+ 'query_feat': embed_multi,
+ 'level_embed': embed_multi,
+ },
+ norm_decay_mult=0.0),
+ clip_grad=dict(max_norm=0.01, norm_type=2))
+
+# learning policy
+max_iters = 368750
+param_scheduler = dict(
+ type='MultiStepLR',
+ begin=0,
+ end=max_iters,
+ by_epoch=False,
+ milestones=[327778, 355092],
+ gamma=0.1)
+
+# Before 365001th iteration, we do evaluation every 5000 iterations.
+# After 365000th iteration, we do evaluation every 368750 iterations,
+# which means that we do evaluation at the end of training.
+interval = 5000
+dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
+train_cfg = dict(
+ type='IterBasedTrainLoop',
+ max_iters=max_iters,
+ val_interval=interval,
+ dynamic_intervals=dynamic_intervals)
+val_cfg = dict(type='ValLoop')
+test_cfg = dict(type='TestLoop')
+
+default_hooks = dict(
+ checkpoint=dict(
+ type='CheckpointHook',
+ by_epoch=False,
+ save_last=True,
+ max_keep_ckpts=3,
+ interval=interval))
+log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False)
+
+# Default setting for scaling LR automatically
+# - `enable` means enable scaling LR automatically
+# or not by default.
+# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
+auto_scale_lr = dict(enable=False, base_batch_size=16)
diff --git a/projects/EfficientDet/configs/mask_fpn.py b/projects/EfficientDet/configs/mask_fpn.py
new file mode 100644
index 00000000000..8358309fa06
--- /dev/null
+++ b/projects/EfficientDet/configs/mask_fpn.py
@@ -0,0 +1,240 @@
+_base_ = [
+ 'mmdet::_base_/datasets/coco_panoptic.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.EfficientDet'], allow_failed_imports=False)
+
+image_size = (1024, 1024)
+batch_augments = [
+ dict(
+ type='BatchFixedSizePad',
+ size=image_size,
+ img_pad_value=0,
+ pad_mask=True,
+ mask_pad_value=0,
+ pad_seg=True,
+ seg_pad_value=255)
+]
+data_preprocessor = dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=32,
+ pad_mask=True,
+ mask_pad_value=0,
+ pad_seg=True,
+ seg_pad_value=255,
+ batch_augments=batch_augments)
+
+num_things_classes = 80
+num_stuff_classes = 53
+num_classes = num_things_classes + num_stuff_classes
+model = dict(
+ type='Mask2Former',
+ data_preprocessor=data_preprocessor,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ norm_cfg=dict(type='BN', requires_grad=False),
+ norm_eval=True,
+ style='pytorch',
+ init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
+ panoptic_head=dict(
+ type='Mask2FormerHead',
+ in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside
+ strides=[4, 8, 16, 32],
+ feat_channels=256,
+ out_channels=256,
+ num_things_classes=num_things_classes,
+ num_stuff_classes=num_stuff_classes,
+ num_queries=100,
+ num_transformer_feat_level=3,
+ pixel_decoder=dict(
+ type='PixelFPNDecoder',
+ num_outs=3,
+ norm_cfg=dict(type='GN', num_groups=32),
+ act_cfg=dict(type='ReLU')),
+ enforce_decoder_input_project=False,
+ positional_encoding=dict(
+ type='SinePositionalEncoding', num_feats=128, normalize=True),
+ transformer_decoder=dict(
+ type='DetrTransformerDecoder',
+ return_intermediate=True,
+ num_layers=9,
+ transformerlayers=dict(
+ type='DetrTransformerDecoderLayer',
+ attn_cfgs=dict(
+ type='MultiheadAttention',
+ embed_dims=256,
+ num_heads=8,
+ attn_drop=0.0,
+ proj_drop=0.0,
+ dropout_layer=None,
+ batch_first=False),
+ ffn_cfgs=dict(
+ embed_dims=256,
+ feedforward_channels=2048,
+ num_fcs=2,
+ act_cfg=dict(type='ReLU', inplace=True),
+ ffn_drop=0.0,
+ dropout_layer=None,
+ add_identity=True),
+ feedforward_channels=2048,
+ operation_order=('cross_attn', 'norm', 'self_attn', 'norm',
+ 'ffn', 'norm')),
+ init_cfg=None),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=2.0,
+ reduction='mean',
+ class_weight=[1.0] * num_classes + [0.1]),
+ loss_mask=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ reduction='mean',
+ loss_weight=5.0),
+ loss_dice=dict(
+ type='DiceLoss',
+ use_sigmoid=True,
+ activate=True,
+ reduction='mean',
+ naive_dice=True,
+ eps=1.0,
+ loss_weight=5.0)),
+ panoptic_fusion_head=dict(
+ type='MaskFormerFusionHead',
+ num_things_classes=num_things_classes,
+ num_stuff_classes=num_stuff_classes,
+ loss_panoptic=None,
+ init_cfg=None),
+ train_cfg=dict(
+ num_points=12544,
+ oversample_ratio=3.0,
+ importance_sample_ratio=0.75,
+ assigner=dict(
+ type='HungarianAssigner',
+ match_costs=[
+ dict(type='ClassificationCost', weight=2.0),
+ dict(
+ type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),
+ dict(type='DiceCost', weight=5.0, pred_act=True, eps=1.0)
+ ]),
+ sampler=dict(type='MaskPseudoSampler')),
+ test_cfg=dict(
+ panoptic_on=True,
+ # For now, the dataset does not support
+ # evaluating semantic segmentation metric.
+ semantic_on=False,
+ instance_on=True,
+ # max_per_image is for instance segmentation.
+ max_per_image=100,
+ iou_thr=0.8,
+ # In Mask2Former's panoptic postprocessing,
+ # it will filter mask area where score is less than 0.5 .
+ filter_low_score=True),
+ init_cfg=None)
+
+# dataset settings
+data_root = 'data/coco/'
+train_pipeline = [
+ dict(type='LoadImageFromFile', to_float32=True),
+ dict(
+ type='LoadPanopticAnnotations',
+ with_bbox=True,
+ with_mask=True,
+ with_seg=True),
+ dict(type='RandomFlip', prob=0.5),
+ # large scale jittering
+ dict(
+ type='RandomResize',
+ scale=image_size,
+ ratio_range=(0.1, 2.0),
+ keep_ratio=True),
+ dict(
+ type='RandomCrop',
+ crop_size=image_size,
+ crop_type='absolute',
+ recompute_bbox=True,
+ allow_negative_crop=True),
+ dict(type='PackDetInputs')
+]
+
+train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
+
+val_evaluator = [
+ dict(
+ type='CocoPanopticMetric',
+ ann_file=data_root + 'annotations/panoptic_val2017.json',
+ seg_prefix=data_root + 'annotations/panoptic_val2017/',
+ ),
+ dict(
+ type='CocoMetric',
+ ann_file=data_root + 'annotations/instances_val2017.json',
+ metric=['bbox', 'segm'],
+ )
+]
+test_evaluator = val_evaluator
+
+# optimizer
+embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
+optim_wrapper = dict(
+ type='OptimWrapper',
+ optimizer=dict(
+ type='AdamW',
+ lr=0.0001,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999)),
+ paramwise_cfg=dict(
+ custom_keys={
+ 'backbone': dict(lr_mult=0.1, decay_mult=1.0),
+ 'query_embed': embed_multi,
+ 'query_feat': embed_multi,
+ 'level_embed': embed_multi,
+ },
+ norm_decay_mult=0.0),
+ clip_grad=dict(max_norm=0.01, norm_type=2))
+
+# learning policy
+max_iters = 368750
+param_scheduler = dict(
+ type='MultiStepLR',
+ begin=0,
+ end=max_iters,
+ by_epoch=False,
+ milestones=[327778, 355092],
+ gamma=0.1)
+
+# Before 365001th iteration, we do evaluation every 5000 iterations.
+# After 365000th iteration, we do evaluation every 368750 iterations,
+# which means that we do evaluation at the end of training.
+interval = 5000
+dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
+train_cfg = dict(
+ type='IterBasedTrainLoop',
+ max_iters=max_iters,
+ val_interval=interval,
+ dynamic_intervals=dynamic_intervals)
+val_cfg = dict(type='ValLoop')
+test_cfg = dict(type='TestLoop')
+
+default_hooks = dict(
+ checkpoint=dict(
+ type='CheckpointHook',
+ by_epoch=False,
+ save_last=True,
+ max_keep_ckpts=3,
+ interval=interval))
+log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False)
+
+# Default setting for scaling LR automatically
+# - `enable` means enable scaling LR automatically
+# or not by default.
+# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
+auto_scale_lr = dict(enable=False, base_batch_size=16)
diff --git a/projects/EfficientDet/configs/retinanet.py b/projects/EfficientDet/configs/retinanet.py
new file mode 100644
index 00000000000..f4b1102e50e
--- /dev/null
+++ b/projects/EfficientDet/configs/retinanet.py
@@ -0,0 +1,119 @@
+_base_ = [
+ 'mmdet::_base_/models/retinanet_r50_fpn.py',
+ 'mmdet::_base_/datasets/coco_detection.py',
+ 'mmdet::_base_/schedules/schedule_1x.py',
+ 'mmdet::_base_/default_runtime.py'
+]
+custom_imports = dict(
+ imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)
+
+image_size = 896
+batch_augments = [
+ dict(type='BatchFixedSizePad', size=(image_size, image_size))
+]
+norm_cfg = dict(type='BN', requires_grad=True)
+
+checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
+model = dict(
+ data_preprocessor=dict(
+ type='DetDataPreprocessor',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ bgr_to_rgb=True,
+ pad_size_divisor=128,
+ batch_augments=batch_augments),
+ backbone=dict(
+ _delete_=True,
+ type='EfficientNet',
+ arch='b3',
+ drop_path_rate=0.2,
+ out_indices=(3, 4, 5),
+ frozen_stages=0,
+ # conv_cfg=dict(type='Conv2dSamePadding'),
+ norm_cfg=dict(
+ type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
+ norm_eval=False,
+ init_cfg=dict(
+ type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
+ neck=dict(
+ _delete_=True,
+ type='BiFPN',
+ num_stages=6,
+ in_channels=[48, 136, 384],
+ out_channels=160,
+ start_level=0,
+ norm_cfg=norm_cfg),
+ bbox_head=dict(
+ type='RetinaSepBNHead',
+ in_channels=160,
+ feat_channels=160,
+ num_ins=5,
+ norm_cfg=norm_cfg),
+ # training and testing settings
+ train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
+
+# dataset settings
+train_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='RandomResize',
+ scale=(image_size, image_size),
+ ratio_range=(0.8, 1.2),
+ keep_ratio=True),
+ dict(type='RandomCrop', crop_size=(image_size, image_size)),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PackDetInputs')
+]
+test_pipeline = [
+ dict(
+ type='LoadImageFromFile',
+ file_client_args={{_base_.file_client_args}}),
+ dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(
+ type='PackDetInputs',
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
+ 'scale_factor'))
+]
+
+train_dataloader = dict(
+ batch_size=16, num_workers=16, dataset=dict(pipeline=train_pipeline))
+val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
+test_dataloader = val_dataloader
+
+optim_wrapper = dict(
+ optimizer=dict(lr=0.16),
+ paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
+
+# learning policy
+max_epochs = 12
+param_scheduler = [
+ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=300),
+ dict(
+ type='MultiStepLR',
+ begin=0,
+ end=12,
+ by_epoch=True,
+ milestones=[8, 11],
+ gamma=0.1)
+]
+train_cfg = dict(max_epochs=max_epochs)
+
+vis_backends = [
+ dict(type='LocalVisBackend'),
+ dict(type='TensorboardVisBackend')
+]
+visualizer = dict(
+ type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
+
+default_hooks = dict(checkpoint=dict(type='CheckpointHook'))
+# cudnn_benchmark=True can accelerate fix-size training
+env_cfg = dict(cudnn_benchmark=True)
+
+# NOTE: `auto_scale_lr` is for automatically scaling LR,
+# USER SHOULD NOT CHANGE ITS VALUES.
+# base_batch_size = (8 GPUs) x ( samples per GPU)
+auto_scale_lr = dict(base_batch_size=128)
diff --git a/projects/EfficientDet/convert_tf_to_pt.py b/projects/EfficientDet/convert_tf_to_pt.py
deleted file mode 100644
index 6132a6ba241..00000000000
--- a/projects/EfficientDet/convert_tf_to_pt.py
+++ /dev/null
@@ -1,627 +0,0 @@
-import argparse
-
-import numpy as np
-import torch
-from tensorflow.python.training import py_checkpoint_reader
-
-torch.set_printoptions(precision=20)
-
-
-def tf2pth(v):
- if v.ndim == 4:
- return np.ascontiguousarray(v.transpose(3, 2, 0, 1))
- elif v.ndim == 2:
- return np.ascontiguousarray(v.transpose())
- return v
-
-
-def convert_key(model_name, bifpn_repeats, weights):
-
- p6_w1 = [
- torch.tensor([-1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p5_w1 = [
- torch.tensor([-1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p4_w1 = [
- torch.tensor([-1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p3_w1 = [
- torch.tensor([-1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p4_w2 = [
- torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p5_w2 = [
- torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p6_w2 = [
- torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- p7_w2 = [
- torch.tensor([-1e4, -1e4], dtype=torch.float64)
- for _ in range(bifpn_repeats)
- ]
- idx2key = {
- 0: '1.0',
- 1: '2.0',
- 2: '2.1',
- 3: '3.0',
- 4: '3.1',
- 5: '4.0',
- 6: '4.1',
- 7: '4.2',
- 8: '4.3',
- 9: '4.4',
- 10: '4.5',
- 11: '5.0',
- 12: '5.1',
- 13: '5.2',
- 14: '5.3',
- 15: '5.4'
- }
- m = dict()
- for k, v in weights.items():
-
- if 'Exponential' in k or 'global_step' in k:
- continue
-
- seg = k.split('/')
- if len(seg) == 1:
- continue
- if seg[2] == 'depthwise_conv2d':
- v = v.transpose(1, 0)
-
- if seg[0] == model_name:
- if seg[1] == 'stem':
- prefix = 'backbone.layers.0'
- mapping = {
- 'conv2d/kernel': 'conv.weight',
- 'tpu_batch_normalization/beta': 'bn.bias',
- 'tpu_batch_normalization/gamma': 'bn.weight',
- 'tpu_batch_normalization/moving_mean': 'bn.running_mean',
- 'tpu_batch_normalization/moving_variance':
- 'bn.running_var',
- }
- suffix = mapping['/'.join(seg[2:])]
- m[prefix + '.' + suffix] = v
-
- elif seg[1].startswith('blocks_'):
- idx = int(seg[1][7:])
- prefix = '.'.join(['backbone', 'layers', idx2key[idx]])
- base_mapping = {
- 'depthwise_conv2d/depthwise_kernel':
- 'depthwise_conv.conv.weight',
- 'se/conv2d/kernel': 'se.conv1.conv.weight',
- 'se/conv2d/bias': 'se.conv1.conv.bias',
- 'se/conv2d_1/kernel': 'se.conv2.conv.weight',
- 'se/conv2d_1/bias': 'se.conv2.conv.bias'
- }
- if idx == 0:
- mapping = {
- 'conv2d/kernel':
- 'linear_conv.conv.weight',
- 'tpu_batch_normalization/beta':
- 'depthwise_conv.bn.bias',
- 'tpu_batch_normalization/gamma':
- 'depthwise_conv.bn.weight',
- 'tpu_batch_normalization/moving_mean':
- 'depthwise_conv.bn.running_mean',
- 'tpu_batch_normalization/moving_variance':
- 'depthwise_conv.bn.running_var',
- 'tpu_batch_normalization_1/beta':
- 'linear_conv.bn.bias',
- 'tpu_batch_normalization_1/gamma':
- 'linear_conv.bn.weight',
- 'tpu_batch_normalization_1/moving_mean':
- 'linear_conv.bn.running_mean',
- 'tpu_batch_normalization_1/moving_variance':
- 'linear_conv.bn.running_var',
- }
- else:
- mapping = {
- 'depthwise_conv2d/depthwise_kernel':
- 'depthwise_conv.conv.weight',
- 'conv2d/kernel':
- 'expand_conv.conv.weight',
- 'conv2d_1/kernel':
- 'linear_conv.conv.weight',
- 'tpu_batch_normalization/beta':
- 'expand_conv.bn.bias',
- 'tpu_batch_normalization/gamma':
- 'expand_conv.bn.weight',
- 'tpu_batch_normalization/moving_mean':
- 'expand_conv.bn.running_mean',
- 'tpu_batch_normalization/moving_variance':
- 'expand_conv.bn.running_var',
- 'tpu_batch_normalization_1/beta':
- 'depthwise_conv.bn.bias',
- 'tpu_batch_normalization_1/gamma':
- 'depthwise_conv.bn.weight',
- 'tpu_batch_normalization_1/moving_mean':
- 'depthwise_conv.bn.running_mean',
- 'tpu_batch_normalization_1/moving_variance':
- 'depthwise_conv.bn.running_var',
- 'tpu_batch_normalization_2/beta':
- 'linear_conv.bn.bias',
- 'tpu_batch_normalization_2/gamma':
- 'linear_conv.bn.weight',
- 'tpu_batch_normalization_2/moving_mean':
- 'linear_conv.bn.running_mean',
- 'tpu_batch_normalization_2/moving_variance':
- 'linear_conv.bn.running_var',
- }
- mapping.update(base_mapping)
- suffix = mapping['/'.join(seg[2:])]
- m[prefix + '.' + suffix] = v
- elif seg[0] == 'resample_p6':
- prefix = 'neck.bifpn.0.p5_to_p6.0'
- mapping = {
- 'conv2d/kernel': 'down_conv.conv.weight',
- 'conv2d/bias': 'down_conv.conv.bias',
- 'bn/beta': 'bn.bias',
- 'bn/gamma': 'bn.weight',
- 'bn/moving_mean': 'bn.running_mean',
- 'bn/moving_variance': 'bn.running_var',
- }
- suffix = mapping['/'.join(seg[1:])]
- m[prefix + '.' + suffix] = v
- elif seg[0] == 'fpn_cells':
- fpn_idx = int(seg[1][5:])
- prefix = '.'.join(['neck', 'bifpn', str(fpn_idx)])
- fnode_id = int(seg[2][5])
- if fnode_id == 0:
- mapping = {
- 'op_after_combine5/conv/depthwise_kernel':
- 'conv6_up.depthwise_conv.conv.weight',
- 'op_after_combine5/conv/pointwise_kernel':
- 'conv6_up.pointwise_conv.conv.weight',
- 'op_after_combine5/conv/bias':
- 'conv6_up.pointwise_conv.conv.bias',
- 'op_after_combine5/bn/beta':
- 'conv6_up.bn.bias',
- 'op_after_combine5/bn/gamma':
- 'conv6_up.bn.weight',
- 'op_after_combine5/bn/moving_mean':
- 'conv6_up.bn.running_mean',
- 'op_after_combine5/bn/moving_variance':
- 'conv6_up.bn.running_var',
- }
- if seg[3] != 'WSM' and seg[3] != 'WSM_1':
- suffix = mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p6_w1[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p6_w1[fpn_idx][1] = v
- if torch.min(p6_w1[fpn_idx]) > -1e4:
- m[prefix + '.p6_w1'] = p6_w1[fpn_idx]
- elif fnode_id == 1:
- base_mapping = {
- 'op_after_combine6/conv/depthwise_kernel':
- 'conv5_up.depthwise_conv.conv.weight',
- 'op_after_combine6/conv/pointwise_kernel':
- 'conv5_up.pointwise_conv.conv.weight',
- 'op_after_combine6/conv/bias':
- 'conv5_up.pointwise_conv.conv.bias',
- 'op_after_combine6/bn/beta':
- 'conv5_up.bn.bias',
- 'op_after_combine6/bn/gamma':
- 'conv5_up.bn.weight',
- 'op_after_combine6/bn/moving_mean':
- 'conv5_up.bn.running_mean',
- 'op_after_combine6/bn/moving_variance':
- 'conv5_up.bn.running_var',
- }
- if fpn_idx == 0:
- mapping = {
- 'resample_0_2_6/conv2d/kernel':
- 'p5_down_channel.down_conv.conv.weight',
- 'resample_0_2_6/conv2d/bias':
- 'p5_down_channel.down_conv.conv.bias',
- 'resample_0_2_6/bn/beta':
- 'p5_down_channel.bn.bias',
- 'resample_0_2_6/bn/gamma':
- 'p5_down_channel.bn.weight',
- 'resample_0_2_6/bn/moving_mean':
- 'p5_down_channel.bn.running_mean',
- 'resample_0_2_6/bn/moving_variance':
- 'p5_down_channel.bn.running_var',
- }
- base_mapping.update(mapping)
- if seg[3] != 'WSM' and seg[3] != 'WSM_1':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p5_w1[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p5_w1[fpn_idx][1] = v
- if torch.min(p5_w1[fpn_idx]) > -1e4:
- m[prefix + '.p5_w1'] = p5_w1[fpn_idx]
- elif fnode_id == 2:
- base_mapping = {
- 'op_after_combine7/conv/depthwise_kernel':
- 'conv4_up.depthwise_conv.conv.weight',
- 'op_after_combine7/conv/pointwise_kernel':
- 'conv4_up.pointwise_conv.conv.weight',
- 'op_after_combine7/conv/bias':
- 'conv4_up.pointwise_conv.conv.bias',
- 'op_after_combine7/bn/beta':
- 'conv4_up.bn.bias',
- 'op_after_combine7/bn/gamma':
- 'conv4_up.bn.weight',
- 'op_after_combine7/bn/moving_mean':
- 'conv4_up.bn.running_mean',
- 'op_after_combine7/bn/moving_variance':
- 'conv4_up.bn.running_var',
- }
- if fpn_idx == 0:
- mapping = {
- 'resample_0_1_7/conv2d/kernel':
- 'p4_down_channel.down_conv.conv.weight',
- 'resample_0_1_7/conv2d/bias':
- 'p4_down_channel.down_conv.conv.bias',
- 'resample_0_1_7/bn/beta':
- 'p4_down_channel.bn.bias',
- 'resample_0_1_7/bn/gamma':
- 'p4_down_channel.bn.weight',
- 'resample_0_1_7/bn/moving_mean':
- 'p4_down_channel.bn.running_mean',
- 'resample_0_1_7/bn/moving_variance':
- 'p4_down_channel.bn.running_var',
- }
- base_mapping.update(mapping)
- if seg[3] != 'WSM' and seg[3] != 'WSM_1':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p4_w1[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p4_w1[fpn_idx][1] = v
- if torch.min(p4_w1[fpn_idx]) > -1e4:
- m[prefix + '.p4_w1'] = p4_w1[fpn_idx]
- elif fnode_id == 3:
-
- base_mapping = {
- 'op_after_combine8/conv/depthwise_kernel':
- 'conv3_up.depthwise_conv.conv.weight',
- 'op_after_combine8/conv/pointwise_kernel':
- 'conv3_up.pointwise_conv.conv.weight',
- 'op_after_combine8/conv/bias':
- 'conv3_up.pointwise_conv.conv.bias',
- 'op_after_combine8/bn/beta':
- 'conv3_up.bn.bias',
- 'op_after_combine8/bn/gamma':
- 'conv3_up.bn.weight',
- 'op_after_combine8/bn/moving_mean':
- 'conv3_up.bn.running_mean',
- 'op_after_combine8/bn/moving_variance':
- 'conv3_up.bn.running_var',
- }
- if fpn_idx == 0:
- mapping = {
- 'resample_0_0_8/conv2d/kernel':
- 'p3_down_channel.down_conv.conv.weight',
- 'resample_0_0_8/conv2d/bias':
- 'p3_down_channel.down_conv.conv.bias',
- 'resample_0_0_8/bn/beta':
- 'p3_down_channel.bn.bias',
- 'resample_0_0_8/bn/gamma':
- 'p3_down_channel.bn.weight',
- 'resample_0_0_8/bn/moving_mean':
- 'p3_down_channel.bn.running_mean',
- 'resample_0_0_8/bn/moving_variance':
- 'p3_down_channel.bn.running_var',
- }
- base_mapping.update(mapping)
- if seg[3] != 'WSM' and seg[3] != 'WSM_1':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p3_w1[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p3_w1[fpn_idx][1] = v
- if torch.min(p3_w1[fpn_idx]) > -1e4:
- m[prefix + '.p3_w1'] = p3_w1[fpn_idx]
- elif fnode_id == 4:
- base_mapping = {
- 'op_after_combine9/conv/depthwise_kernel':
- 'conv4_down.depthwise_conv.conv.weight',
- 'op_after_combine9/conv/pointwise_kernel':
- 'conv4_down.pointwise_conv.conv.weight',
- 'op_after_combine9/conv/bias':
- 'conv4_down.pointwise_conv.conv.bias',
- 'op_after_combine9/bn/beta':
- 'conv4_down.bn.bias',
- 'op_after_combine9/bn/gamma':
- 'conv4_down.bn.weight',
- 'op_after_combine9/bn/moving_mean':
- 'conv4_down.bn.running_mean',
- 'op_after_combine9/bn/moving_variance':
- 'conv4_down.bn.running_var',
- }
- if fpn_idx == 0:
- mapping = {
- 'resample_0_1_9/conv2d/kernel':
- 'p4_level_connection.down_conv.conv.weight',
- 'resample_0_1_9/conv2d/bias':
- 'p4_level_connection.down_conv.conv.bias',
- 'resample_0_1_9/bn/beta':
- 'p4_level_connection.bn.bias',
- 'resample_0_1_9/bn/gamma':
- 'p4_level_connection.bn.weight',
- 'resample_0_1_9/bn/moving_mean':
- 'p4_level_connection.bn.running_mean',
- 'resample_0_1_9/bn/moving_variance':
- 'p4_level_connection.bn.running_var',
- }
- base_mapping.update(mapping)
- if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p4_w2[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p4_w2[fpn_idx][1] = v
- elif seg[3] == 'WSM_2':
- p4_w2[fpn_idx][2] = v
- if torch.min(p4_w2[fpn_idx]) > -1e4:
- m[prefix + '.p4_w2'] = p4_w2[fpn_idx]
- elif fnode_id == 5:
- base_mapping = {
- 'op_after_combine10/conv/depthwise_kernel':
- 'conv5_down.depthwise_conv.conv.weight',
- 'op_after_combine10/conv/pointwise_kernel':
- 'conv5_down.pointwise_conv.conv.weight',
- 'op_after_combine10/conv/bias':
- 'conv5_down.pointwise_conv.conv.bias',
- 'op_after_combine10/bn/beta':
- 'conv5_down.bn.bias',
- 'op_after_combine10/bn/gamma':
- 'conv5_down.bn.weight',
- 'op_after_combine10/bn/moving_mean':
- 'conv5_down.bn.running_mean',
- 'op_after_combine10/bn/moving_variance':
- 'conv5_down.bn.running_var',
- }
- if fpn_idx == 0:
- mapping = {
- 'resample_0_2_10/conv2d/kernel':
- 'p5_level_connection.down_conv.conv.weight',
- 'resample_0_2_10/conv2d/bias':
- 'p5_level_connection.down_conv.conv.bias',
- 'resample_0_2_10/bn/beta':
- 'p5_level_connection.bn.bias',
- 'resample_0_2_10/bn/gamma':
- 'p5_level_connection.bn.weight',
- 'resample_0_2_10/bn/moving_mean':
- 'p5_level_connection.bn.running_mean',
- 'resample_0_2_10/bn/moving_variance':
- 'p5_level_connection.bn.running_var',
- }
- base_mapping.update(mapping)
- if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p5_w2[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p5_w2[fpn_idx][1] = v
- elif seg[3] == 'WSM_2':
- p5_w2[fpn_idx][2] = v
- if torch.min(p5_w2[fpn_idx]) > -1e4:
- m[prefix + '.p5_w2'] = p5_w2[fpn_idx]
- elif fnode_id == 6:
- base_mapping = {
- 'op_after_combine11/conv/depthwise_kernel':
- 'conv6_down.depthwise_conv.conv.weight',
- 'op_after_combine11/conv/pointwise_kernel':
- 'conv6_down.pointwise_conv.conv.weight',
- 'op_after_combine11/conv/bias':
- 'conv6_down.pointwise_conv.conv.bias',
- 'op_after_combine11/bn/beta':
- 'conv6_down.bn.bias',
- 'op_after_combine11/bn/gamma':
- 'conv6_down.bn.weight',
- 'op_after_combine11/bn/moving_mean':
- 'conv6_down.bn.running_mean',
- 'op_after_combine11/bn/moving_variance':
- 'conv6_down.bn.running_var',
- }
- if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p6_w2[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p6_w2[fpn_idx][1] = v
- elif seg[3] == 'WSM_2':
- p6_w2[fpn_idx][2] = v
- if torch.min(p6_w2[fpn_idx]) > -1e4:
- m[prefix + '.p6_w2'] = p6_w2[fpn_idx]
- elif fnode_id == 7:
- base_mapping = {
- 'op_after_combine12/conv/depthwise_kernel':
- 'conv7_down.depthwise_conv.conv.weight',
- 'op_after_combine12/conv/pointwise_kernel':
- 'conv7_down.pointwise_conv.conv.weight',
- 'op_after_combine12/conv/bias':
- 'conv7_down.pointwise_conv.conv.bias',
- 'op_after_combine12/bn/beta':
- 'conv7_down.bn.bias',
- 'op_after_combine12/bn/gamma':
- 'conv7_down.bn.weight',
- 'op_after_combine12/bn/moving_mean':
- 'conv7_down.bn.running_mean',
- 'op_after_combine12/bn/moving_variance':
- 'conv7_down.bn.running_var',
- }
- if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
- suffix = base_mapping['/'.join(seg[3:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[3] == 'WSM':
- p7_w2[fpn_idx][0] = v
- elif seg[3] == 'WSM_1':
- p7_w2[fpn_idx][1] = v
- if torch.min(p7_w2[fpn_idx]) > -1e4:
- m[prefix + '.p7_w2'] = p7_w2[fpn_idx]
- elif seg[0] == 'box_net':
- if 'box-predict' in seg[1]:
- prefix = '.'.join(['bbox_head', 'reg_header'])
- base_mapping = {
- 'depthwise_kernel': 'depthwise_conv.conv.weight',
- 'pointwise_kernel': 'pointwise_conv.conv.weight',
- 'bias': 'pointwise_conv.conv.bias'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif 'bn' in seg[1]:
- bbox_conv_idx = int(seg[1][4])
- bbox_bn_idx = int(seg[1][9]) - 3
- prefix = '.'.join([
- 'bbox_head', 'reg_bn_list',
- str(bbox_conv_idx),
- str(bbox_bn_idx)
- ])
- base_mapping = {
- 'beta': 'bias',
- 'gamma': 'weight',
- 'moving_mean': 'running_mean',
- 'moving_variance': 'running_var'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- m[prefix + '.' + suffix] = v
- else:
- bbox_conv_idx = int(seg[1][4])
- prefix = '.'.join(
- ['bbox_head', 'reg_conv_list',
- str(bbox_conv_idx)])
- base_mapping = {
- 'depthwise_kernel': 'depthwise_conv.conv.weight',
- 'pointwise_kernel': 'pointwise_conv.conv.weight',
- 'bias': 'pointwise_conv.conv.bias'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif seg[0] == 'class_net':
- if 'class-predict' in seg[1]:
- prefix = '.'.join(['bbox_head', 'cls_header'])
- base_mapping = {
- 'depthwise_kernel': 'depthwise_conv.conv.weight',
- 'pointwise_kernel': 'pointwise_conv.conv.weight',
- 'bias': 'pointwise_conv.conv.bias'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- elif 'bn' in seg[1]:
- cls_conv_idx = int(seg[1][6])
- cls_bn_idx = int(seg[1][11]) - 3
- prefix = '.'.join([
- 'bbox_head', 'cls_bn_list',
- str(cls_conv_idx),
- str(cls_bn_idx)
- ])
- base_mapping = {
- 'beta': 'bias',
- 'gamma': 'weight',
- 'moving_mean': 'running_mean',
- 'moving_variance': 'running_var'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- m[prefix + '.' + suffix] = v
- else:
- cls_conv_idx = int(seg[1][6])
- prefix = '.'.join(
- ['bbox_head', 'cls_conv_list',
- str(cls_conv_idx)])
- base_mapping = {
- 'depthwise_kernel': 'depthwise_conv.conv.weight',
- 'pointwise_kernel': 'pointwise_conv.conv.weight',
- 'bias': 'pointwise_conv.conv.bias'
- }
- suffix = base_mapping['/'.join(seg[2:])]
- if 'depthwise_conv' in suffix:
- v = v.transpose(1, 0)
- m[prefix + '.' + suffix] = v
- return m
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='convert efficientdet weight from tensorflow to pytorch')
- parser.add_argument(
- '--backbone',
- type=str,
- help='efficientnet model name, like efficientnet-b0')
- parser.add_argument(
- '--tensorflow_weight',
- type=str,
- help='efficientdet tensorflow weight name, like efficientdet-d0/model')
- parser.add_argument(
- '--out_weight',
- type=str,
- help='efficientdet pytorch weight name like demo.pth')
- args = parser.parse_args()
- return args
-
-
-def main():
- args = parse_args()
- model_name = args.backbone
- ori_weight_name = args.tensorflow_weight
- out_name = args.out_weight
-
- repeat_map = {
- 0: 3,
- 1: 4,
- 2: 5,
- 3: 6,
- 4: 7,
- 5: 7,
- 6: 8,
- 7: 8,
- }
-
- reader = py_checkpoint_reader.NewCheckpointReader(ori_weight_name)
- weights = {
- n: torch.as_tensor(tf2pth(reader.get_tensor(n)))
- for (n, _) in reader.get_variable_to_shape_map().items()
- }
- print(weights.keys())
- bifpn_repeats = repeat_map[int(model_name[14])]
- out = convert_key(model_name, bifpn_repeats, weights)
- result = {'state_dict': out}
- torch.save(result, out_name)
-
-
-if __name__ == '__main__':
- main()
diff --git a/projects/EfficientDet/efficientdet/__init__.py b/projects/EfficientDet/efficientdet/__init__.py
index dca95d53a35..8bf4981ccce 100644
--- a/projects/EfficientDet/efficientdet/__init__.py
+++ b/projects/EfficientDet/efficientdet/__init__.py
@@ -4,11 +4,15 @@
from .coco_90metric import Coco90Metric
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
+from .efficientdet_head_huber import EfficientDetSepBNHead_Huber
+from .huber_loss import HuberLoss
from .trans_max_iou_assigner import TransMaxIoUAssigner
+from .utils import Conv2dSamePadding
from .yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
__all__ = [
'EfficientDet', 'BiFPN', 'EfficientDetSepBNHead', 'YXYXAnchorGenerator',
'YXYXDeltaXYWHBBoxCoder', 'Coco90Dataset', 'Coco90Metric',
- 'TransMaxIoUAssigner'
+ 'TransMaxIoUAssigner', 'Conv2dSamePadding', 'HuberLoss',
+ 'EfficientDetSepBNHead_Huber'
]
diff --git a/projects/EfficientDet/efficientdet/__pycache__/__init__.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1cf3a87d682a8042e0122be26cc9b9014026b5d
GIT binary patch
literal 868
zcmZuvO^?z*7;b^`*)DWh!02@^TN55<+bZ8gdzKM_-TP#0JM?_}bWTHK4z^i1`z(?vwpTPU6qUvo(`r$plu1eH
zH!JaBkMR3GP;nOQpi&{%Y4Bys;>~9fZ}%)v3K|e$7U?+HCHf=KG&mh-a9VyqX%t5p
drB8QuxK)Ix9#!H!*sWk
zI2xaJIr0rSBo`#U3kPmUeL~{2Ux5q%svbM*EDJZoAhk$
ze?C=dStOZu_h!?KYxm0n=DR(ry**yYQUJGcyBLg1@i8yBNR$x;d$;fYFaC1x)6edr
z!Y8FjPs&0i*$3q@mjkrE8Vz9HhezH6!bwCKi7eIxZyoNUuiS%QgZtdQMsUWgA6;fM
z=b;-lc`#`)(1KNmJ4V|9S(|r2&jUR?of~x-g;NFkxv#(a`l~0r-R-%$d5}mR%k+e6
zPY#ldOYOl?(h}YSUMMbL&ecp5156JNNE`9s`4Ar2r-W2=;lkrpkeK_vcE6Qn{DN
zqG31>IKS{K=bp7_EPO>zt;#;5izcI0^98a9K+6_CR&HfMV#)VF@i>=A72q5t_59Rf
z7JS#LyuWESpQ_m|_Wld2mCx*|eTiHpRRg$|;g+&1=dvqgP
zlHu+?f}oK{f58|51LODi04p|2^PZ
z04{$f;$<-P=e_N`P}@_k1Z*a^PsT|xI^xB6mTc!aNb4Gz8>Y-Ag4m?vERt2jt^A}BJPaLR+2Go%CgI()Z
zK2%PyT*2ykywZe~Rz*(TDyUj#7T9P5*6fK_b*eT#BL^D=V6I!a%>1CP;0?88Un5nw
z=2t5yyFwNl)y63xnt5jy@MPkPUsSW&ctCLlklRQii;!ivZyC=-0C40hS7QbYl)@PLQYnF`C|qB%6X=1q75tK-I6@V_t@!xQnZx2>c&zL9k@oA|;>X+=JQ$}#6(6sd
zIcU|A;}tib!<~vUsatU9cv&i@lh))F*jHb@(DBxQ{uZoC^pre*8Jb+zzD}=OoAk1K
zi?(f#`qs&lJ~~);6F~y9Q|j
zuYLigPlS!+RU{_zjU^II1a)x7w8cFP^bYFaov}vnxS|PU10IQ~@Eg}@_hp9~Y3~{B
zL6V@P*7ibQ%wEQc=xuY&^uckS$-YYD{pBHK-*kt^7mDmZdBq6q&QZ6vAowVSw9W*WRT62j!##YXATM
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/bifpn.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/bifpn.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e70d9a360e90de5d67eb3fb4250a55e6f654e3f0
GIT binary patch
literal 6544
zcmb_h&2tpT6`!x&9gTKZpP&!0S=-pKHb^#-kc}~5ka3)VEE^mrYm@P?)4PnAnO%Bj
z!H`k8Dk3E=r0h#hIY^gO`I1Xc`8RUNp>Ih}RC3AHm9O9H*o>1o
zzv+Jcx^=NsvK0LO<>)oY6KSE}@WIS?1{M85{OimUocpw?8CXwBDZnk@Ces2P%{
z`?(-r%S+nu3xQcPC7tsJ0;^_8I`0n##ab~a)k>%rT+=THL$#q`xHcS&)J9b0UBw-6
zt%r(hF=KUfU9XKXYjb>y_=9c{{30{g^>wv&z}2oP)zTB#rc_m7T=AlK8AR^dJukWi
z!n){0>}t#H_zboD)%Ta)xpJ+liNUKKKlU!Q!iLurrT5!$N?yO;W=~X7ri;-f){bv|
z>_rT1cQ5*_`fV}1)Vdd5y5)o+^Ha|FRp;}SR?9!-UULGr;<&CCHtS?h?N@00ckbfo{AYc5(+)x^-9DS)~xh1#!
zP^k^NL+&uj;)Y@?#~PDdTMxdrb2%D#0%paCn{dp
z?!*<>3o4BkudK|SsI1K6S?Ism>cp8h@zRM3SUX-iRXJH%S?s?Nb^;qB=toLRiz*>w
zZl%GTxWic`aN1D?V*OX@aIsx)G%Lr?v~I4R`7He$OI|(xtOB!K5a_lYd#%v%t7j@{
zyX2GIeLXZ-($(@iBwO*F`;5<2mRQ5V2ahUoi(b8Z%6;2)pzUt&i_P>kge@L?UzU(A
z$M+sMd)czw3fXqoP92{9etZB`-rY>M_!4s9=~CF~kex8JMTfM
zxBXc#T@VE1RGP7*`IoN??c#Ft=i|S7_swo<5>dlq&2nt4w2qDa%6|gYw
z1VU%+sCn|z;lKUm^z`}VswoCD>r520Ye!fOk4Lc`djS&@PP^^jw{M0vd}L$TXP7LRT!t6~-Q9ys5-wH*ZGpwYrZg#^Al>>ONbb%
zd0q1{>7IP5KJ`?6`l+>#$SzJUVX*Q15daEk2TkF2c_>$;w#QO2y9)N^;_{RK;8)da
zR=W6UQS$>>ulyk3$-mL~`uo+B(a;^ajO?ZF6y)y7p85?C~6DKi@Y$3ov_Y?K_OF=-;N^2X>)pq^A_hLWXveR
z7{NF}-#|V=Rmx_>PpRLSoSIRw;)9p@_dt>U?
zPp2d3*B7&OPQg}~6s@W6l%kGPih4~c>QY!Fm%1pWQ~#xO+D}TSeTab-ha;Ut@E19J
z?)oiZAXA7$q2afjc%J33w*9s8
z%?GnV7|*s@jDa^qHySNAZ#mRLN?dKC!H
z-t-#naHf5qABAB@@I)FV`2&_(R`c0E%~UnjR1NuO?MWM%mT|S1sqJ`28&>6?UW5$(
zbPbYPU-$P;Ylb?hTG0O#?#X{JwtZTd>BWgUMIQQ5Nni33o`_-rHZCO^PLNh4>Xy0&
zL|9Xot6CZ)K86VBpP&%x=e&k8g-?i6sJDWfUdVorN|XmEaJ;2G7|oc_73o`W2vW4p
zqy@vUg)HDpiTbI&rmg8~NL*lx0b7uWY-&jk6cUt8J<&iRiP5ilP!1_;NQ5@&DJFw53CfKtpk%tUSxWL-
z>bNqYY>`KhC~cPU-Vo?gQj*$AyJ#aH_OuOTw2dUij5cW69EG+qXd6m~q_&}5w2f!9
znHg;dl5$2H^?4B5CZKI38IjsXcF{JO(N@T4n@WZ=+Q=>i$>DDyJ>B>5q14CV9Y*^z
z=xHn&lRb^?($lk~s>csB@65&V$gJ%Tq0<_NA3{D|Nz!92k-!Bv8{2o?xt30@^w
zA$XtQ9KmUV4+%aXI8U%haGl^9!3BcX34ToQ5y9I8Zvd)?WR&5bf)-kP4$nNEg&J*(
zUWj5bE#}fe(tUc7)oh@4DIwQmuETX6<&R
zCa=z6rMyW8CWFQb7E5ex&CtCq0nCJY&OlSSM4j~E1p$7w#B
zuDP9wwlI6w9()pt>b(m@D#_j02bJgofR6a9YBJ!ek5yHX$9OH)SB(v2L*39e^bKPp
z*T6B|)gPL1E>YLjC1qXx6&*?P@1%
ze*_(_a$Domt{Q5t_DFf8J~AFb|Kq|}+8qO4Go-t&U#=Fm2fXKD0-e*h#NkGpc~z)(
zHJ>?h$kWCMn&s&zJ(bAiMAPOQ>nt5$kT0~)9f9o;*_|&ucwT<8ekwC_7Pq#JADsiy
z6NhClkXA|sd7I!Bf)fCd<47@?$jN-MYNbJ@Ma|L>c`}l(NaJ>XHR;Fh)c($a$f-{j
z;IdnZ&}&*GIolzEJdG
F^}p>-PYM74
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/bifpn_no_bn.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/bifpn_no_bn.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..755c5433ad50f3fc5982771fda85b8d01c3e7df4
GIT binary patch
literal 9256
zcmb_i-E$j9a^G((mHWa%rZ`nx+5VU(W&yfI#RZtFq*`j0mx>0c?+e{>|?!5jV?fGfFjQ>xHcY0537qR0tO
zt*D|*Yv~oehcjBbBFR!KQ!xcbZDw0m#cEAerbJ$A=34nmUZnMAp*3BZ7HOk7)0(Z!
zwu+UaEWIze8P|L&xu&N!<{I<6YUKoQS=R#2^7O_@U>1Oxa&y4sdYDCE@@@f`LJzY9
z%(OcL%#4>un`LkM@YFM+KkF8OEhhDL<;rPSejt_So@2h!dRgY$hkh896>i;b2V3{r
zZnx=i{lOzY+(e;u%L%;%iJWoohj-rl@Bu1M-Rm|ZA1!Kro#%hpiIU{UPdZ+e2=^Zq
ziFfdZX8}B^BD+#W@l;oKm8TL0qam%jnn>%ej)56`PIUf3tJCyaUJyBv-wvt@+S5Y>
zHN3a+hNl5yIg$?LSbiqYOFK#|9V(QTrC0$mdA+Q1J!-S+CeMU^z18+zJ{vk)o?UA*
zd&8+deB>|}mDFnVn9Dvlm=|Jdzn0hxi2wGJORXTf)bS$bHEZFe?M)}BKk|aju5+o?
za=pl_(RASi6B_cr=;!tgQ(PrCaMRdUy
z2+$0U8O*0?_b7;+84r41uX4lRhL=@8At-uzl%FWD^@+1+O$df6r^R*^_i(#Wd2D
zAPOXsVkL-iePK+5PZ51`-RX3iPq1yY5MspT$4AFXGa+FPg_wg`lRE8-!8+Du5tZ2z
zfn@@M6O))gL{H%rgiV=IeuM=k5cGuW00S0aTPm)4AmgjKx@)+Z18mxw=Fv~5AJ}Am
zW$Z~cS!k<##Yl8iC9{VOS~0pGpt-}3G|
zuImSN(nJx|2Ul*b*Z*+-@8AFD?Wb?o|MS=X@lW?Q?!R4D*%=Iwo2@qKgl>zgUMGaq
zO!Z1~ToMa5vJYv==ovATVt0|LG)WzinXG7H
zcafpgp_-h>_uK8}RrdiDXKx=>NnYq7iu6w74gVG(l6JvJlIBQhkPItdg0;2=Q_4w37k3kjK
zp}*~M%Mbkk79*&7>^e17n_d7-Lh`~jht)%Fko~WR{Z$*4^$$=lBvF(gIAonIB`43&
z0`|V!FQnrbT7hxdCc}`ydmC@~4*;=D_S%(xt}IEPDm%)KD(tojhCuNyL1jM!dl$*B
zi#%8l8;+btr3=!Ime_ew4(5;4M;`T))*7)F_4QbDwPk6?fKoCRrJW3^Eac5tk4@yW
zk$z}EB#bx{n{l?TJyXQK%k*|2?K^$_`G0-$%{PN(Vc*A8$|jW1rUS*TwVY^^YgNy0
zvLB#?y#>JKyIh5yV{cMjYp_?jyvgN9JmWo%m}A2|OA=dLYkIYa>&&ljV(Ufi4mUR1
zQPgft+TDKus=|6pa#6jkTvkl%G5nNyg?x^Q%hTWO#*y7V*xd}Lgdt_pmkzO~zlWmt
zsaN4vD7hI%m@3}%q1{eTxCJn)nQpRvZ}7>W?@+^JLH6R$YZe<;JW*F@2@|I
zoVw?%LJ?S{-ZNw|Ze0ng
z;uH=VT`j#)x_={W5p-MRH-^31Vnn6D^W0L+b6|hHQp@RtB~(kB^o@4u!p-(Znt`DQ687kC1`~l)p99E!Is;JUP^jGGufR
z@JJjwgpt4W{}@TiM_LJzCJUo|Nbx6m)~aO_&^bm7s_4%
z_#b^#c9yEV3a~B|B+vAw1OGo4Rvj?3hDB*JC``5l5cs20qx;>btzU
zkX)W~F*W9I2x*2QO`SJx)qEC4cI3A_XwgpSH`~DxkB;Yt7n7TPZnD=g%qp2Ddgz<_
zz%x)8GI~k8+3ks6Za`5_!|mfiOKVH{>Bl)1(P^m1jg@M%3(MsFzvMP9JZFvZ5%{
zCYH^KGIL_t{4woL9MkS(f2`;0rBSa80LjB%!^}~m&P!q
zG0e+jm@{LTSE5|wZ1ig5oNEG}cdY}3EYAUP{sK5(7OVE!F|}S#Yh661)*ET9m1AmM
zN^4zCYh}j9btSEJ^_W`M(psy>)LKhxT~BL`bK*u?>wD4ncS(+K3ePl;fEoJ?+vM(Z
zctS(25RT*%4rq*GGl`d^9@&vN?&>a`cRxld&%DpOK2)Afk(ToB47^?THYmU)!rteF
zcf-(QWX#@Utjz`*fK{kcuGYD7-Sz?s;<)T5qFuG!dBQ%WLENOg^#J|g_zayV&WOTw
z;bv#ej@owTI#Sm~>IR?gT(jNwqx5VI#OhJR+9+ZUHJaWQoNwsSAPJ1|InW+M7W%Xj
z`@##TBsxp;*N^0H4Ci~NUG%bb-R^d{IOngWsns-fjZzlXOU9n0lfII4(od33`rz67
z4#Q~*rb-0DxCX-H>
zNp;`R9X$qpfszoXMugJ}IQ$st&r(c9!|~r33IWV3MRgQo9jP&v|F-&S4yR`PP%rrr
zh3_DxYVslubz=~7nvCC`8@SZthdc_iQOxl+vFie?CoG@D?!@w^Djo24p!*TV&=AnT
zIU7M*9Vwi(4^pjjNEnH`~-8>3l3-8s;l2hHhtTF{)HK=Z^9O>=~1VT>lmcz6;t7eI41o)t7_
zC(v9RqL~??IWtBRb81NNO@4zmODXS{6W#-N3gxHK-&{N=`kR~3-wQ+irE`d9h`-=F
z`6bffi*b=|Cwd&82j(SEE`joi_=KQ*VgluthbX5Uj|(G|&&1$yN*Vk)dW~38G%0`5E6JwV3$Bd;30t~fnN~#Ie{*L7J+>Ndj#mX
z&jJE}O5m3S9ua60_%i~(BJh|%2cUdvC?2A8u5qQaig!&|89%`1212^{TuVNy$!858
zF>P)<@sWp+{mnn3=J5W>moxVMdpUc
z2!k-N3KUm84TBKUdPs_az?k?!o95I4^z|ZiXhDAQ_!MF_2&whzEX#|>qza(8T^!QB
zI3S6OyTXwe4$s(d6{?8Hu3=GCmZ_RWJcXJ|h>ukcy0IRq>`zzLyF~hz4OGm+n6i%oUd_;%db;5dz8rYbCBY
zQn=iuAdm9Kv42S;K-f*YDxrwitZeOKv{R*J%Jz)0z$5!@ZLQr_FJjE@F10fJw$L(V3
zkMJpASd_Sj$NMG8F_n&{^w5_KEljKwz5_4_Y>UGuE@&DElkdrU%AUHX?df~QUZ!S}
zx_W9xdMxkCcX0RoCAsxUz*${Sqvj+rv~Y8H!>a&wRQv3;9o#bStNRF%;vPt0%dQ;Y
zmSPNNxxED(1WQTNHWN@DzfhpOLptMr#N7K6q@7X!-n>|h*y
z)s#D;ni7pf$|zAwUsK8?kRd=b{8ml$S5%3*o=3toNA=KIxj4(HM)?Ky9zNW_`4Ts)
zNq53Rn&PhzD?uREwKCVwuGG4L_$PzYL^vr;C)U$!wW?byRWza<$0+X23oTs3)_ue*
zDTdW#I*=70@t24d=G7@w^@I_B_*8;K*s2*OGo?9m)9FTSI_+}vZm+-OuG~lhlj0~N
zPI}^yDzqz~PDp#K_x}Jf+o5ZLbQ^H6L5Du`5l?&O2fPkpWZ#k)1_4Sw1A<-t6PCn6Bi
zNcXiyR_X(zk<*zsd~=X*lsL2p6bo%B}FD&0kQQ;pt6_t3Y|x6^mfchbFdA1%=l
z`YzPoPj9DXTA`!#06j?Wpzo&dp?A{v(!1y(dYB%eReCoaqxV4aQPtZT=wo!8PS8m@
zMUT^a=`=k--$zdZe~RjKhMuNp=q#PR}eUUB!!E9mi7v;?HupK^hbZXi0fhkk#grdeKU0!O)
z!q62_7|3GFYj3uFSLWJ|koF*qLy-ZNq;}~dyy8`ED!H*Nw8A*{-TK&G_X4-p
z^_?if%tk|L!W(^=?+0For#uYd3|MvDM;j-=U*?_R5RS1T%w^q!y$T@=+Xd1RVY?mr
zo-Dv=B8dD?uzGFS3$av61FtQ@
zbq5AAVI{1uICLWRxfyvuUsi+*7j#|WV4c-@!Xb>%_CsD%*Vsi?)b^qX3t?xn
z-^Vsjp+>?(Pj(!JajzvtQEw@i{x!luy1--=1i(7;WhONR2#zL-q(6Qu@J|Xzuk1?v
z!#;^8!KWN3mvUM2r&;X$9+07@ZUw%mfb>li=?x}#DL}Op?*lIyAa(Tm1ivVvB3>^2
z9Dp=tlp`G^o@ZW7fZJepzknh=!sv?`@J$AuVxXyjOsTIk`risje~l|SjNT-1mVxQ8
z8{pGVG5I72MhTeb`R_=4mVt*D$VN%O!aUnzN8-IZe0mKWRA<5TSD5@;2Jm3%gvmt(
zq<@McQ}?Gb;J3K)PZ(u&r)F0MN*3P^v5D}RvDsit)XesvE}
z(&8Kg+4RMgoyznZ;4uBSqezvfrhWV{OMHOQZYJ>)%)i1YH=fQ`@XXWSR6zP=u5_48
z`}j2T+=nQDV3Yxh^gT@8nE`*y{D(oNO@vwr%52jX21t`;sXBn7PK>7pB{~{AMCT9Bx3gxt^O|t=b
zUgG^qLVN;6`WXhMs;jyDC6q6+_gIMIV76TXlJ$OS<1YU2Q3gMGr5m&eUTHlm$cXdUfj}8Xj_byVtc!+TL$>A|KI*D;YyY%9Odo;JJNxq8_sj#R`3Q0aeD!7`BILkF;eMFxWmE>Gfysl5I
z#N0l>oDQk&`NTqt(yP#aQc7~$2a^)lTpd5heJs4Ji%NW3d?+!u^esKUjmy6AvL039
z!(>Ck_!*7LpG4yO0cd~OOmaxYD&rq}GIXfs1VI=(F%sXX))CW{
zC#qe(9JzjH8Eu+earv@s*QUR-$_dfKy8z>yR=+@AJHGUAZ4H@N6xYJezV4TIrR##7
z4R4l^ay5CtXxE{<`Aj_L*rA6&JWTsi^`bG+Cq|+t#>b4S<~i+`HSszsmvYirJ}PyU
zfy7OZ6spLZw%EcxC9uNX9zR${49wcijzoBWGnWAmo2-HpI$Mgjc{uDAj#6W^bwzZ{ve&yKby`6xF<-cJKJW
zy)_3(FZ^2@1>V(>`?jUqWtkn{m}Wma5SiDPO5XP9+4ot-Qx
z%it}_a5YT~mEm}cWWn7){z`Hd`FiALR#Zw9kc)D~%v!zPJbj{hp?>T`yzoA}%MylxqDdPTPl(=ZI%sOlEp
zDv&UZ@tyzQTEhC6;q{?rpcMHX4ASOh$iI0!MLa4d{td@Oopi(^Y#)wxlbnZ`6;Goi
zZm9TWZ0Gp;dfgDMU77rj%(ltb+Uo}4dWcw)Kg_KPfk_1w=}fW_=?p5P
z)}%7{0?LZQW)wD~_^RTos~Vzb@nwDEXk17Tu})|&>zD4qR<|Z4=x8S;=z?oAbot$@
zHlk%i8nVD4MB9#!U%gM}P9us|T8F4WwQ)$b%9sFc=Dp5(QN&*@viOR0T
zTr!)?EofBY2r7=KG3Id|l=^yfBbgzquc54RiTWp0{XsSJQXhJYi$KarWvYW3r)y3Q
zD7_b0Z^(R*ti-0idtTy>mkF
zCd~F;4&g$UR5;`Fg7JOVfdt2%EZ7x#E`J)3`E`#F`wI9N
z5DNDcE`F_WLz)Pkj^v2tc0;j=z=v2I@dc7(ITLMmd|cyg7H?(&j|Uhau&VP^ngb{5
z%aRvFu@ki2XsIG|VT9}Tl@MPm%4(|)aq09nWDYJtA`=aL5AkY8&1i%pXG8o2rYv&<
z2Vw&+5P!}V_$rgX16i6E|H%Bmf=D%A6%m$83(u@H`jNVo=`a&AG2AY;}|FY7KBsUs1Bl)ECew#cSx@yq*?l1
z2GdFw$iN@cO{1b42z>Y8Uj=uUdNcF|gge_nYkeMEMFqd{TW=0}x5WP46rqBF^LSKz
z`#KoxBTaB#h7Wz9_z>5(#GQNZ*i-l{he(Bt=nBU-{`9Yu43r!VBL9MF|Jg
zEeGkFrPZz*xEn)pjIY}&zcRJ){WmX3O>KD%sYXXW$@+tROvVu6uEh69;MY8;pY(lI=B1
zrHdR&=B_!uw=ZpovN@|XV^_gjOrTOXb|+778=6E%K(sz(itZIxG|4(BrRi)KbS$DH3?Iv4fgehiG#apr2q_7Rfdg|o)
zwbS)eX9On|LVXtbd*;~d(&Upu{0qPH3Q+MuyoHVQI=}7_6+J9qWjxWlK%lp2%-KkV
zY^8f)T3WlMg}LyoxW0n4$Ta4U-1&&$UY2IX*HFk}7%i)G)>NVYn~W9W%^^00Qc!8t
zxLalX%WO;}iP18DLv13pSLOvG_{!0!W>UiKB4HPsf`g2#I76?wE;#QLkurgt%_QKR
i%yvqleN$DF=J`r%HIq2Twu-7C?4mgYMf`Jf`u_q5#^bL5
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/coco_90metric.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/coco_90metric.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..566b587b2d2858453f3da636be6bf01e6eff6e2d
GIT binary patch
literal 15187
zcmc(GTZ|l6dR|p^^}VO3=fWXpcu{dgiR|^LhoWvPE>{voii+$N#ZcU6cUDSIO`n>c
z9(H#%tE$DBK{aA8wU!oGZkUCg1dtfaYyt;C-V8WSHb9X0Au#g1K#am-k=TAPUyQ&n
z1{Oh_?>nceFGEUkkcaf3PgkAGfBtjMfB(&1XzN=|}#+T%i#={%<
z`oBOSG@-Y(HTA2n>GIv^q}EJb)uq?cs59D`PIfIT>r(ApC%=~O6xIrz;##p&S}S#?
z)~4eA<+W+_H`|rY%-W2sOSfk`b8B;*`L+4Zk+mbTF4JD<99=sqpR?^_oyE09`J8Jn
zb&jtcm(Th3iO$Kjlk&OHKGk_@?I}DLMX7CdPOqKrEUzu=nsajh=>zR;O-zaMCz>cb
zGxt*W^tCfWe_yLkKf(&N>s37}zUK+YbHvqFBdDe$lfRMqt(G4|x%YZO%kA3jDEGc|
zr|)zdPL%$37gZQnc+>Wscb&j%HL9toaIMvLuC!ZD7lTE=(~HcuYYX(tz5DLB+-B49
zIOek7Xti$H8*K+~CFf(iUH6=Z-ERAM&%XQCt()&$c^{8+n9CJTy<_{g(Zlbz1OJf@
zjH^auUU~1zdr^)h0W`RfJfUMmzVeBM&^j&=Fw
zigo$Mie-0&)wz7b+H^fjIDylUV*;F|Z?(FOc3-r*O{?d*J=eF}s~hf)gfr?&0IjYc
z*q}JecbXlLm7KU{-Do?YJIix69nT6}IdCfodj7@L)dtWOHs*1Hn(H-JpB_Sdj<`#F
zID!p>Hr<<>;}&5?22`k=fmIRJn_u)8@|^!Z9h0WxhvziDnO(5M%(uNyDc2@
zxeeEC1IrJ#9PBxddx)akr+8g*z;hfi0>Z5ypX8d|9&b)?t#|t!-#V8_x9_#O0k`FP
zzq8?ZzU6KXsr2zZn%I@`xz0U*{``vd0t-Byua2;4x&3;u1;FPvAsvI{FyzZe6SVe@
z`)w={l5fjx$4A}i^#kzOjfr`=)0c17o>b-
z3Z6th^;Y0;uZH9^fv+MYuq8#c)wMfLykQe_K-Rm^He_yjfMXIxjr=MqR@_#1yY1Z}
zWn5DnNRD*-0X8Vcz{f&^EmT=O$K${WG)B{6;{vPYb2~QsZR_rq)3tWpzIE5`2Hdll
z!wFv@H~|M8?1Zx$Nnf6b-{0u809eA{;K1=A0~`;m`{lcSh(>`u#Yw#fS=ZWmTpZm3
z+_(*Ny6rHr~xP_lFv8`tgjQ1a~pQ!w$l~gJS$rM
z?X%VO_0g0Kx7~(ns|)Aj7St5g1@OMA-nfmD$>eNtS86>)Mv5~hh)J|Iq0+Eo%d@*s
zl%nskKjwvrwE@8mU--4uqEy$-0p72?fZ!$p+Ft2GiF2AgCb6jfWIKF7(s9*
zl_#Y2$E`68Kq!(nR=`s2&6-e(FJ_bpP{{%)?xaQG*k4MHkx8WiTP7b#7HnG2Gj*&5SGl)_`N60)5S&$4Ub2p0=%rg|H~}
z`-ZLsMrZ`7dxm!^G`ePJH1$xco5Bz&7?J6wIVkNs8*1C;Rqdk_1N03`x3k{G0ETpE
ze6ES?LwFe9*Y8~XKnqhCnF&&%d5>IeEitPG}tBjJ?9W8nk%Zhu$1seMQ5W`u?j3$i>a%VR>LZ*;wCc+X&q
zyk}V)MG={_a-v*vdSmBE>t^p=b$S-mbszc~oCyPr)c#dg(jg0J#R8XGP0)L!*P56V
zpa1rHb5nl)^7ZE5$j|9Fycsmy0^#K#n0hWumc6A(3b8t|cmVTM7o)V_hZc--^*W?S
zP_IXYmY-PHaMS4NWflH@l=}^5_btzJJ?|7|@t#5v8Qor#Yhy&uh69+wgi%`Bu%5*}
zr&%nsWBIZV9YCLSJRGz~A)z3Yg2ved$8c5G(84vvE1S+D;q
z?N{Gj?R10H9;{zX=&$Z=+1=({r@Pg+S38}dQvucJZbJk3t8Z;?wi@((uR6i1lf0ng
zs$9>j=hd9G-mX^z;+OIDUqqpmD#n6d!e34|^jR~LDx`9{sh`j@@n6Nv7#Y1}=Jc|@
zU}RDmeecK>>`~&K)sjPn>z<8ui##>CB~hj=U2pF!=4vpy!4J2Z2XX~pzYHnR)COsx
z3*!?l)URsy^g(9lPXa9fKW!WLz@Omu|2zOMZl|_Q)`eNPj;Z_l06e&z+E0hM1AUN3
z|2%lvylr?QOz&rc?0!z955R%Cc4rxL6n4H9<^k*KpcoeS^K`xs48IH61DDqO><*vu
zi`s{fGJ~l>c`zLo_A_BAoWi$!RKxG|QLSl8c-;@QAiG@%3n(E)?i&LJBPo0titUvwSDo5;ZjD3Mr=q2u`>n(o`1GsQEHjisYO>Db=x$VRv+mD-rg&R*SKtt)KLvv*-T!pydt(D?C*#T!3@
z(7^f1kAc#}_kDc*c@#~kgpW!CooZxJ`$xtd69l108QK7j?~!v6f--97saxsm$SPI5
ziN;?QE>WO1+)nSc9}y^L$Y^`dPcZFcWOGT%BLB78XmnNgE@7-kRH2Xf^N6TMnO3ul
zAjUOFM!(^?cZJkYKr$_Y-5vsK-c^pHmO!Ox@3j2tG$b};7^F8Wd#5ZR@xDcvDbz>C&hwG6`+TI|jr1*wIT0WNfr4aOm|bH
zV5I?B@@(
zL>;s$~xz0T1`wz0$
zj2ts7dzt7p8~3V!FX!wMj`u1!L@DWEP&-yFWJX)^Ud*e*aLgDFfT0k3w()(zqNJAb
z#TdXvI=M0&?!yltKT<%#M1V1k8s@Z9rQ7Vb_7JWD>k~s2$z?i+RR~VI<8h`DcRwytl6@J4Rug;Y
z7D8bZVo48W_Y6@=xcZ59&>=S`4!KZJ(Y25<7v=dT*3|GA7!{KS)?*XJ1F%EOzT0u=
zjI8eL>@2UyT^Iov217WFcnr+M;2KY`#<2i1*+M@uZuvjJNC-mp9;2ft%2IAGQWUQ5
zw~$@4ZacfaB(-ItBC){4{7t~(8MHHj11(-qB`FkfS{zW%RSGBB-LXJTlqOC3Qutnn
zO(LB%_#5_y)Ap6_&iH`hz^80=V2#1VP_A@ZR!2oRcuv#xcF_E7HSxxX$0OjAJbCLc;CRYcaz1ctlii}
zXg10SkUkU^MSpc>%z$~Coo2X&b)IIDii~DB^<-75C3d{G3E5@QVL=nd>$3PBiYP62
z2UvkOz1ytzSY1-AAhS-m9|;zs`h=l?@=_x}zGZQg|KLf0|JKVufUDyPp%JEm;F
znS>iz!uzOpUO&bfXtgr`3|KoT_ZBC#Tg;PkE+lnxu-CYQ{u8v^9FQpDdj((rC(v@r
zfrM^AK#5ikwG&*)6ttg72NT+EX-wOdpgS_qaM^uWL16}(C5K>9F3eKnp@hCeZZ?OI
zP#%)r4D-}ZJ3mJYq&J+%Kc>Sc3eZkP{6c>WN?}P*qalzaOJrwN4J@sam`++~X(1q#
z3J_|7yE+BOG6&bP06Z$9bYKLTuqd@8$4_zGye4MuXP^by2YJ{7>ZldW3Jo!shW?sn
z$P9I7_$=m!dhCpPlR6Cb&_>f?SscM{8Y?O%j_jQk=&lq3V#V#^e(3b6;Et>g9&$E?YTydxxe
z17Uh!eIxNc>2N|unp8aS?mor8eDe{!R1ay?0?rrjkA6U#qS?uwyx~_nYEJ5}n
zcCHdrQBgQ>p<6P=Uo|6>ekXMq)D`fL6;z8Vtc?^;7rPM^v9>td@9*VmF_~1$UY}?Z
zJg*-x$Y@I)B#%pJocDPJFE^_osHR39av%1$@}~4GhiXTpgcKWdO}fQv>NX|J3JsH)q#`|%?({L
zrR`*_c9Pbj=|kl7*MQ-3PHid!jbrMj400Oqwdqsk=xIx>r97Ph<=oNfr
zQUsc+6dKYeGX^Q9NCuR_^n}v*r5}%!2&Fjn+`fLG?Hlk?;5ErQ`1E6SrmTZb8>>s>
z_E9EG!x#Jv5+2DDw3>htLxT3fjU@Cz(gc0}1$w~iOIp#Eg^xn4GQb9*Q))zbYV_*V
zJG3F*OY)uAW+b-o_{0?ay8kK4Vd{3q{~tj{Ov_YCN>JBj1Uw{YZ-i|TAT>7)sh@I4
zm`p*J%-H`D{EJ9q!|~y`AiE(qmg0`vY{gqEv&v%*3Y`z8nB}lBvpKdj#u6re0Ao?!
z3|L>6X}!41`uYm;@~y_!P<+eiW_d4@a
z@AYs{RAx2OJ?aXTlHp_4s5DMT258>(T6Go3WAMV0LS0I0O7`nfeh6sq?FlOaPLqd=
zfh`^!gHNM)lRAKPU@OhqbzZU>8aJrO!s@?v1_rx7en2bp~^{dP_mFo-i&QQ}%0z#(qJh$e5@r
zBA9`Ti!cp&5G{l=@BLiPmjzZw(58Q`J(NNr%)hTaFn2ygp9P3X^P?Lu#)(-jEFcS%$o#luG-@
z#T4Hlx@4=E{@i?MavvV(cb?;DmTOT)_UFQ5pIN#+?9bQ2UeeN@79I^tEtq|JM$P{oMXa
z{QI2zCG;F1{X#q=o)u@sIZ+i~Yi0&Vc2?wUX9h=u8epu3^WjlmgcZN`IWqr)W8pCb
zB9C6uFyo&hO!vz#ok80^|G@M-z<6G)K-De=7qBvB?H9LS5XTTYH6En=8^Md;*FiZ;
zJO4~l4%)HS2cVqegA?eV-+w7QaR3Jj5Ke}Ra7y(bAOO!YJdS;QZj%AkQz8DubGMx0ytbvSev#R;rn3C`THd4wB(s0Ggk-`KtsF8vrZ_CsAfFD^8V&opst2@dAK
zLfbb#)jriX4e^3_@snKe+GprT7y8HA;B;s;^}%wuEM7vJ7M^DPE81W=cs*POo=;2e
zdpUf1`;DUr`QrT4d70ji#4FtVxO#r5g3!OjJjh6cp3~knILntM4MOTv->}a@c3*jd#+TZ7sN0k9;#cbU#0WVj
z69vkTdkg|C90$rsDeEzKPxJ?`B3#1DwJX+SUPD?a!wa>_DjLSaEVnBGP!!3Nq
zEE*MxWyP09>Eh*#9M0=fIqg;Bd=jIs6Y6_12vDolpw4~^N=x|~pWq33lsqqDNbL5G
zBpb#Ep^td8G}rcuZ>dx{;qRpY#-nPD^N@R~%QxTHGYcqxrRUN7;->d0=U@y^`XhU@
ziE#k)CZ0hC^!6^?!g6_a^6_{v>Jd%+M;R70
z_#fd?yY?v0fZiSz!GdtRSN`Iv
z?_O5DpWn-0zFGI#q#iq?$F_Q$QZ_1Xha#fByf?d?EFaf&dM>Ya|33gv)n3?)^00pz
zxZ~q?AMwr*97?+@qXbcbm*iAaluH7zk#YIPL`Iyu#Q2`SghIw9W(WFbGAI6-_5=7l
zNo3->3_E~ouA!rhal*we!jMC3q22Ew2rwNVC>{JDK<@c6dyYaBu$fef{BVdOHtLhx
zI!_8wOyEpoOU6@ZKuVl*%;GCdN70c4*SCp=#AOnp+z=rx(hJ@nu-m_3@rNw_EsKB0
z;%6u#y}nnelQpkrEgq%zT0O~NQI0oe
z88wM!Z{f-HaLeH;Z0QC(q1`Cu`uHcBZQSot0Ul`!(>?e0S@05%jApAGKA99beD8Nz
zeF=rCj_*!+zsYLar|P0sR5~SGPZql=AKpN*W#w;?cbM6`362$*9T)c$ba>A~ir#
z0yKS&dVww9VevA@q&lrG{23$&0{m|Qn7XkAuPQ3yP;eU!HzXjtue0|Qd#3~bs}e(D
zxY^L}Ri7D44e*i$?nD6dG~ErXE9mqPF89>o;z}lhER3P3`)&V#-IM=#lK;=7?yw2g
zV`rQ7j^9*if)p4iz8Q)foVqFjwZcqbr0)P5LKZUb!cO%`t|Aw-rFRv^4?~>-f=}Amw)R~sb0U+x7%?`wq6%*qh9wG
zxXV;^-akeWP1kv!Cy7_0{bM#!0eU9eP5y_CM|rOTljJdZMNvvHb^RwB->Xt`g1js<
zYTj@1X`jWvWJFPxR*z=!{YF#bD=DC_z)MkV3(qxAj@8~&IL#pKpK_(sY;TnX9j
z!IV&_a{X$jCxceb
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/efficientdet.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/efficientdet.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a91fdcc5a7a54254f9924a4b8c2d648d5c79107a
GIT binary patch
literal 986
zcma)4zi-qq6t?sGu6LzX+m(eW%SB>F2tg|t=yf;-l*P)$zH3V21ltj`%7D~Lo%jzJ
z%7VmSgyfZpe}Rejk`@joZ29whwx8ekJ^Sf&IwsI=zI@9+xP*L1=dSr+Jb|HK0C2)-
zNh+FBindcam7BV~?Ur8Ur+yWrK^3MUCBnUlK9lE!d)&Vw+!yrHy`<@g2MZF1Kj1bJ
zQ@gh)f
zM>bk!`I}`eh4rM!-`LS|S-)qeBI9;sR8~lquhup)LYv;&<(bJ?qeP?XTxeY@8x^t`
zHUlM0t7IPC62^H11IkbIeE>r)$r9|0Eq(XZ1jtR&5LHgg?Mvl
z_%MvgtP+gbF=JKD+Y;>wV{hB69Cnlk*DCzWDnP(|c6zHV7iGzq+Q$uqn5sRr4-i5C
z92$!qg@j`7SLg9KebDQkK~hob*-(O7>6z|-HPiU5l>?FK_Ha^(b)k(qSNPC&YglYe
mQR;sv+Iz!Mp>8GhIscFxETm|oM}z3cn5gGDp%c3A?$BRoM)_#~
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/efficientdet_head.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/efficientdet_head.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28815ae6b4b7ae120eb4a2e8951e6ed0bbaaa8de
GIT binary patch
literal 7803
zcmc&(%a0sMTF+NiWxcDbA7jsWJh=n2a;=)`Wq<`NGrRV9W@ljBGpt8zs2wYvl@V2)
z?aZulWR~r2vi3k5b}x;@1uj0ck9*;OxO0HN0CCD2LP!`SByMPh7=B+wz53x{wE~f)
zjEd(MUwp6lzAtap>lF>pqrd()KYCl!{*fBXl@lg}U9dWxWuVqe`zL>&37TReM!gFNL+J-m6EAUPHF+uo<;_
zEm<#zYf-z`mi0=w9&PkCqRrlBwAI^+UhBOUUFuzuW7Y6-^m_00=#AbRXxCUhyb`_H
zdo#M)yQ*uvee%`|?YkOlu;w$3HF@jEJkom(Gwy1g)=QjB>lnH9Nsy#@;q%EjE68{;n?Kyt#IT&%(otUBA|WT#P`A=9=fTR#J*32nmS5-TzLx&0$;ptCZoJgfI?^i-eg#I$EtuRLoJtE^sy
zSLNC&YdyEv8f&AkcB*fkZJ@8t8@zg1<4v~t++bVmHT1RE8+=Xnsm($pOkD?(24!%EJmw%KX1)r7n&>Pi_Mu?FlW`&8ommG&`$SWsGA255V%G^rNdg|>
zXr~b!4I~iBy#R6`@szV?YiPIgi!g(fL>)^_mvB!dsZqgEH71M?MH7q5{PXtBlt@j_U!AJEC}Z#$zg(b@|;*CmXPVXlHDt}`@fu9B3O
zCUiMBZtV{LVB;^o`wxfDzCHZUpZxpZ-RXb!?T(pSDcK}D8bwf$NtBy>oa}b&yt*Ru
zd5Ic93|q4zx_SMS4CeJyB9_~8(aG)kaq^Ju~;U7<;90jJTXjQ*UPRl1+M!O?Wgy5qBz}w
z+6cf&b`GJt!zVo6n|M1>#CXbmV$}}rk$qH?omGRg!{>Gh6HhfBxIo#_t^@hOxVtBW;wla%89_xdl<_Lom1^Z&K+PEGUP&LJI0SQSKWkhFYfGe|xGgu!a+bQxQGj
zgxuEXRDWtbvPeKrK`COfgyPJhTp}n~2P*6vOgqMXds&;W1wJ)WzM1_`1g-6uf;>1W
z)l%T$7Z}}d=K6j|7hk854i$vwPb)Xbboira{N9IXCv=jcad`6;k|R}BQKx%D+Q8&J
zROTR7svRpg;yAC|8L>&Ie3ycN_;aat9Yg#QcFOhO97ce9uHqf2Jc^$g_SS_y7;(4P9~*l6N{@ZqL0PDgN`*k{0AOW%89^|2ldS
zVvA-}^orinH(*r_{8PDMSbD{3o2oQ){NrcnTl(SK|FfJfWRgrQ>FE1-lYd6>^hT!b
zYcQ$rLfv6h_l2{2IE%CW^elD0l&TxfHeBBA+!cDo}Z2Jc0+l7?rY`TV%HooxycQD3!XoMUkBN
z7QrNoD{fHnTPR>V$wKCOl-o3id`4w17dA7mNh>%*{R_-a$S=^0n%=-~OP~MSvb`b8
zS4qbzD40x(40&$L(9I2))Jt_qlT_0~=#unE$)N*~++^luZEB$gFX%G7B(w_fhIDXM
zhqt8uwG$)5wL@1n_@M9NsFJ9^0U+W%6rG~dmfxX{PpFW3QzH7hqj7HZ
zhq(z{){wj8Eyq}y)KHNAGz=RaNyRwqT&x`ztoQ#u$~r%6AFPcHc|=J$IN=)FYEKN#0a_K{P}zCiRm1^7%<_FhtnH70XGJ>Dj_d5sqZ}
zf1tv2W;{bgLvG@<2=*3_$TH(jcxWVVr{*7$(+xe}x4=@k;)sMO^0AK1FN~=@Eu*iP
z*;!deLwl!^MYtmh}
zv-V}_vO;kF823dbTgzHU#&kWcrnUWg)q>mx1fXbTeJgHnL4tJtncKdH)L2_P!VTlaLZD8e+t^s5&ma!#;kbh$q2?_Yl~Pd(oDQtyEz
z63s_a;~WMny3ut$PMt|I@xt%`$t}u(;V2It(4l$q-~kdM49(E%Bjfep!8uS~nu?%5
zL8jyk(Il=5g_|KNvmjvK9Vj7ogAs^SQU{qA$wQeT-FA3)*xf$2mmGAG;E-=iW>e;7
z03jO*tVsQf5Lh`WlEYHLzk+7x@-uT0p#NJlYC0FdlvKQsdUNueI}8Jtpcr221c{?C
zV-A}LI+V;*QH6+4Nhs7$pNPjm
zN*qwaOX@Jin1B$cD_mUo!*uDGtv&i`4z~=}cejh+D
zJ_6Zvd4z^K0@D`pzCME^etXFT(=IvA9#vyq!2kMpLCsg}9dyh72|?0Z4uCi2*V@=_lNF%oj8yh>j)W;m)w
zihLJ`_aZ|{d81S6NUTxOLXj7sGyYy)RNp_~?1Uq@wnb!GqcI%tAmx#0)5P^v!p|JZ
z+#*TpY$?}Zaq)_K1pfD^Alj=GBRGy#hdh~~s?bz1W%DsIpUfSE-f-4I2A_h(8UA@y
znH@FSmJN62+N-`bX`5%$SG!hc36IcOTaa-d4CC
zNU1t)eMThm+A6vJ4BIB;&}vs2h)pYqPFwJCR0hz{ZKGkf(bq;)V(CT0&>f5|$JM;r
zD`VHyt;ng{qU>nwMcj4sit9!)w}X1kbsxdJ&qhkF%SOKIiZKzzL^viW91wLX-l5_<
zRQxs-l$cSjp>zmw{gdR-*EsQg8X}by4Jw*c{18Q6pQTuR4idUc`amPE(Ig7#1!>?q
zUTID2hE<}oRzQh%wee1)4LVim-MA!YcD@EdJs86VsvRXBwv^y*9IJ%KKA`oQQRF}F
z`f=Rti@<-F$ZUg5x8;opIr3ouBMG|jPz9EBzI;Tg5{U+Aiwa^Yclw9A!KZaL(hQ_9D=IjJ0ISGg*;lq(hGd))&p7Q3Y6
z#7Smr8vXnF_3PKKzxQUZ*6X&0XZR04>;LR+P5T!r%$_1Le}FgsCnQW`dZ2}RR}YP@
zp;Ori3ZdCG^L(*eL|q{$h2?Hp)|tp#T}$SRK_#@iw#=7;Vp#1~WxgEL!g{wJHo6U2
zwt|(g*=@>vC0Gqx-ImPT!CJW9T@O!mPlPACC&O2|uY{+%r(|0-I32#)eKkDOJ%e(M
z)q}I)Yu(p$jkoq+f2Mt?u?Aattg#i|+$-$q-E+*iskNKWu_~=?Wah_yoMgpYlX1W|
z+j>^K`OuHIkZJZNe!$#lBtkdvcDO*@TF>`l*N^2)`
zJSgzu{IA5ztjw&Z)_1gl&MM4)T<=<_sXoToVQ^ZsDq_kA8E*LZT1kFRdhKDpN?JubX|H8gFpothld
z04L^SPk0G;u5Na0+rbk}Lbo4yam-`qydNd)i_YdG1QlmAaG)N1I1)bE_@jO>VSY4p
zk}dA^y#BqR7)>H3iRAruI*uQ?{Vgwwu#?A-o3;3nq~b^8N#YE+mrMkALT}uW)TrVm
zRJMG^c;w8c={Qig{yomzK6GPgR!I*F5q!)OKk-LVEZ2pejyx*;!O%I+{64K)DX>E&
zXD|}7gBSR_9*vWf9h}kFd1EjE)A9h}=CN+93Iqz4I|N+tESj(Vs6%b(eG!;nD6b2;>&k5HpZ0
zjyp*%U(pscs^cU)#zrj;Kn40MwvHbotW5Gvn&!@LTJD`{9pwAHO^NkMIB6k8boneYai6%!KTd6^=q2lS!Br
z_&DBdTUm8M=d&^u{0MeuL3OkGAsx)>hg2-HW~!4}v*lzZ!J)-b-^y)L1;1syRlM=n
zk)%edxmsF8s;4ESMpD?*w#|W->S-Z0(=yYanyK|S#`ko(DR9h<&9*7dV61qJlGiEu
z6(sGVI7|6+lsHJTVmujhky);?_HMh9nK2IrYP9TpKw>^j63a^W9(rOJ!^~D(WeQyP
z``XX@7s4pHfMX*-C%&+YvpamqqpgW|Aq*K$c%QcF0`8P;WaA5q2Im5wStoQnRB%ai
z+$}Owo$-!%6U=`FZ`?wn*(II+EZvZQC4KkJMXF0?t}|b(F5bZCK3x{{kW$b#&~N+`
zB#E}KKhwTTBA?|7kNzYvzpHH*_w;Mpo$FIQ)tP?J5MM`KDWaD0)Ie(NTS+CQT$vfH
z@TBkzn}br}xcd9fZ8?{`PjKfvwhrdl)1T?!z$Ol4d+FHrYEl9965bl#`Z|CM&5DtY
zgBDg^s9#B%d$hJyW
z&BlVC8Xd&^E{FwUC4I>1xPK3dY8q)r=Vmo4;Xv_-iFMG*
z0CR_KFH(c6Gg%w!27W1BH%#3&TLqOGI)3qI=qL5v
z^IukH=lV&em(K2cc;iP%9yzJDt-;iugc;CbRtu@VZEhEt{tW)vee1UBQF>vIw_#dg
zg7@^Pk&oB5EuxSbL}6Z6IiSi4FiYm{xdnsxko&`}op7=dVE+K&dBQurhVP;Vu0&&V_%?(#p
zlSU}#>ofF@8%Q*x2Eo&7VUu3I0gD7+r%bY;nI*j`*JttosQZ!BhaO^*Hb`#HZzDrn
zlNL^EQxiFWk<$PxC=~%pbO>ArXi>k~zLCP>(`^aS>YG@gloKhI_nYsPY4MRG>v7Id-2pT
zi%^1cTG%h9WdcO^*KvNoJjK!i#cxZBWl7OW4N$Zs#R2M#v@oFP2NcaZuW1b*@~+3!yy7D
zi5F+5k9KnXA^Z`pHlEHlJty&!>mfJ(-;rTDGakd$Aj~!`VUtUHIw`=manFd~ND6;J
z2pA`S+r-8K7sEv%m&ANh_{^AE(+cWJsg+iwt5RgeC&j6q+MgAt)oE>7pEjl|tR!c!
zh>D!02}N6d^j1>FY}TGqwUg6Ux+<}=m9|bx{0Q~=D;Re>T}_*N#&j*ICbjK)x`MO;
z4$4n}G`Cln#VSwq{pK@mx{|Ddn-)~oNY~P8T1)F`BVA$klL~lUpPoq8()IKNt3Dy+
zsd+zzv3t)8`jdbnAB2R-qecuP=FAzXP{a&)BF+InxSybB4VO&w2Gck
z@d}(L<*rhRp1BUeWtZNq3b@@tjNvLga;1C_yAMl+k-
zojkPPxPmaVps>;VLX3o*DXY?lhnz=MNDz+VKr+&olefD?EUi+~M3NP8a{60YNqwpS
z@CinMyd?xe!?6pzp72n#sN>qA;B$bPnWRtclL{uxuU|o0B5qMavR4s6>^ny7@;GNz
zv8kN8l?Mn+GItPo!@PpbK6z_7|5;TTAJy8D1vdx!s%}+U=)C)4)nYGkhuB%2i*YWi
z7C<$9zgb{Pq3spe#KqdoCNbb63m61bDo;}{sYF&=6xa7KZG0JtcD4cM(1v^11m01h
zIYYOMMxlke7JL*_FByjJpzWYt^{ah>@3J@{klP<&Ht{JXT_jma2{5Z4l3i9?6kA@(
zB_@TXkPty%ddQ6+R3wqmKO&Qw`#AXkuz<074*tzP@R9mb^h~w`VZgD(5ksVlz%d}J
zg>OhEkWgvgl;dsWKqPQ1azj61w{$<1=JV!Eubr!
z)u5Ooxw@#efpS0{vs2WMIH&@JRV6UCJRd&J+(aJ(awuevn}jlY!JhvH>bd^QKs`(I|KGu#+?XxRFZh59j_;yNFiQcy1z0uT1-cOFnIXSqlz6j)
z9d9`AEWtf>SQcjeSYjW#D&!e
zk|PCWcz+WW?P^xWj?lugGT=lAj4duw&p)Sxph@nF%g>ux6=wn;MCCo3HR=9joGO@%
z1Q8^X{m7gP7fHLOZtlZ(h4>b=lW#7jlzf{Kx;f>nnXPogWqv4$EZuzK9(w)=Z~PV#
z?HoluDL!f<_+%ptS3?OQD7|F1;O1MXZ|N@&M=f4#Iq;w`#w+xRF7iU|x|!{|q5PbH
ze9d+5!sWC#SVF5x5ENbyZ-
zk{d-KMX^H3E+w?ute$`D>vNz5_dp;3pTA0-+LW9?BEK=vtX9J;)3t76^{r~-s|_2F
zK)&mz-nvY|5L1^U+77VU&KS3jnqD$xNzm4bA{AsGfM9ne4Eql{{V3}6gx|jx%Lupp
z=#w=Vi}1MNlUtKst$5o+`<0}AW%yY
z^th8lB`OG;Xkixk_Z?^*g8X7fP{;)Tfg&p>iH~sYf#45>NFcP^Iy8&wEFr2yo<&u$
xPV`n3c`ow(mNGgJxXO{ZGwYJlh|9EV(q(BP5Nt^egccf2nL7*3tQDI2e*q&e3^o7&
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/final.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/final.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2459ada7e5a5ab0ac77550310703e302c1dc83de
GIT binary patch
literal 6880
zcmb_hO>o=B6$U_nq`)6hKem2sC$yc|X5vtZ^_M7)e`Tj>ZB>n&q-Em55V4dX10cNs
zY$tqaI`;t{;>e}d5sy4>*JL8We(EF_t=q09a
zrMBeSAxl}3Dua(P0;wWL>NVRB7AmReh*3A4dfPDrW;9ICusy@{EP*!LK~yrlrm@0J
z3nh0=$B6nrHP>y%jkaeuKr$dqzhwICdedq<47K$2AJ1R8wgfD9z3l|{<)+uLS5VcK
z?%Mw9W0^Eiqv7k!ZSs3p8V$Q{GcOR0pqig9Hk-~FYsqxkqG?&Sw-S}+n|Hm-tET5M
z=aSQ`-;M_7Su0rm$o3h;uM65YG&D*2=lQx4V-n39y_fO&uK)y6C~e7~D4WV=Dv)=S
zP!3X|vLzpq9?6?(poW;+P~A$wQp&~`dKr)H*@103_6Fl+jJLeRS|&G_9nmjZ%kQ%l
z&V0YgCoVgt@7oO+M|{aygK_57*<5)w2wMK!#KdjJO}}b4CpK0m5`z7Sa`{7(+a{Qo
zFdZ1Okgx)k#Z%FE3P_~7X0RHm{v94fTEl6Y0arnjbUc$$hDw^o
zlpk2XllUl@zx~OC>je`n7I5Y?e5hi2D|eZ<+BPR#*J1&y2X@n&Xz}J6P=BI3L-Q=y
zUuoE$=~P?yqJi~Y?F~Ox-zcCbKMf$I^YWm=$AQ(U+IhroL&g6g0F!F6C1D@QwUkw0
zD$~X!rm^(yFJoy|`Zw8asUce#D|^3C!@jWe`%>;tK$Z%e9F%VFM%SdI48#f7E7
zTw&%E%qC13Po+_h*N{I~ruF}^?FBbaV-j2KplsQ$nBc|9)8)k}ywlweVZp?g_~>*Q
zq5M$;I}X4Vu|gZqs7#ob;FpUlN?G>{*1|#CIq{
z%fq4N+;icO7M+K>x{0;(UdO4$_Q7(2hZakzstfh6-uULV%ZLB**O~G23zck?LsaN&
zohYB&4w2rlxgQvT?XqaZY_*(w#_3W;rppstlfBxryfwU#<$d4ph3WWdOQvA_C{ZbC)P%^Yo0ZfM}
zz)T2#Ze&9ZurEvl>R|>j7fNfHuVp?OA@iRvn}Nj={nS@0Uv&Dp9h{T5#JsRN^2uln*8HKk~{iDee8+)_!IR*
z2|W%c@FNNQg#`X$P=>A1(>|*m?MXe>lX|=-^`)NFmwQrA^rW8bNj-(}R)Sa7UbWJI
zr>(w+iltjQVXN0LU)4ug1;)g4xL)saz408bGhMF9=WtDRxu&~Z+EaSXbh&1q!}VsD
z>#gT-&2_on?s7eCiL+gKpQyvy&cAXmd^OPM>npqDhRUw~8X`vO|Yq1-^3Y^_(+wdDAj8k;E!g-VPBj}UAK;T6JWdfudKStm<
z0m@CH{?@c%A*<~8H4vtrBuwrnD4rMTOHdKnDpEq8CplTj37ST02H1?iW}_j9a@aa^
z4EQ}S&Zj_}ewvv5U^mu8Iegs-Jg7`3D^tnJJIWG99mg2;8e`ODq*H5bq8N|u7vph1F&_63^(`V#c9BlVRDC4O|HA^qO71g^I-Ld#L*(3ijHJB*@G4&a
z?*Njn%SAbz{3%&kk+ZUjU%n@%cBxspsPyDfO0xK+@{oaFO2IeEuEg&FQ&f3W)*XwW=gQBNCA{~-8k(2Dun-6m$AJK*-U?TU}DTL`q@~||5Gb$t?XW1#@yMEw>upp!r
zdPpN1cBJ(sqz#1mgfvLn8HBVUNb3*#g|z-2(uNb#vI%Ji!eT-i^?4A|Mj&k<91zk5
zdPo~hNXsOojfJIzG}22#p7vXCk9Tc+D7GJz%LOvOW+*>Hwk<~;70^#KXZe?&k0;3
zFi*fFaErio0#^vs2`m#>AaE6+a!7a?PIoYo(wf9Og?CznEnKl^tyv@`C??}#DlVo`
zh%ra$yUbo$#XXG(>|%VtDZGkt0jB^f#uKF>1*a;ZJ}Oh9LVBYzEh@L-%8aOxrf8WJ
z72*C8J`hK*yC%2(2p*rll7tV`75w11^Qj_2!zkSKiD#7vL*U$P9hXO+gB1}5_IcOc
z&MVs08&_}e!|1(|r}WK3CP3+%VIb+lH6if4Vccn(&TdP_KxR@$xTJ^>rEp}9)MeZQ
z@=a=vrrv(}}QTfr`u<#4e&OId(!sSjm>uG)|1-H!Kf+Cdt_}Ck~qV
z?n4P3Jw7e1?wwSQrEnt!UyL%Z3Z=Tqc1L;BPfg0il?=~*_EZI{m<$?4-exN;o{D+xeD|b{_LltJY
zakY}!8}KuebaYPBgF}N-eLa$mN;;9sA4f|<#xbgPcfhy$SvnXX&bQ7Tf$l!(oz89?
z6I0PmL8@nQk8C>rIUpT&X!bhEqC|~v6L^!rTL6*9aZSo1ZP{+Lyow$>hhI|j-ibqe
zB9tpr!%+f4IkMyK;{48mh{a19;6DM$7ZaSPL{XK8vm3?zgXm^4G)9{H%Xod-c61xE
zhW{tNQpO}?Q9bf0q3lDj?&ed%(>;+N265$fXdXM_!j`06V>?p+u_e`_5%%kktnByFb6VG`bo;xoO$B92+7JgpO
J!`b;I`M=)Ax)%Td
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/final_mmcv.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/final_mmcv.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..374323e5134e7c7c5afabbdf59d95ca1278799c3
GIT binary patch
literal 6894
zcmb_h&2!tv6$e0ogg}a-K5Tv1PG~!^&BT!s>nl;5_)B)0)>buglED%=F{;79<7PwA1MX9K3z|_TlZ@
zx4UoOK3?qaHzYitgnz01c~+ACMwQN^0XdI1_!EFF*|IBna#fZIS6rp4h%)1;RaMY3
zuI6Q{Sua=3d3seBG}Z0%jH)4U&CPp-YC+&xx8EyPivrKN174|G61d_HRtIg}?sJE{
z;p(tAQXTOQR1bIus|V3OXy@(1eYraNo@Dpi#ru+7Wa`G)cBVSc3cC}JB+v)!66hsn
zY-hIR>LFWMkt#!vF#@R~$Ldul2$w3E_=s7v+*-@ELT1)2-*kM_@@;`OT47u={f4>5
zEgL2GEZ2+&KDRa<#?6-R)Il;K%(!R;>{`QaxeT@JwVy6tzPbV|f34+)&ZUN5ch*qV
zR_-{#`eT_iP~(wn%xmzwm+N(><}g1Lji6eeFE<+Q8GFU@*s^8Yj=vU{78`f`OY4^J
zGxws~sNIT(7Fjb~|JVr_#BU1Pw=^_K`scZtl3)_e8@-qC2Co2wQY3B5pDA0)Rwk5p
zl}HXVk+Llxk{-!hYN$q-+eqEcz*5S+FO4!D$9F=P+2?`jc0(ws%M4ku^QawVXW2Nh81!Z^okZ|L*_$=SY5LmKh7pI8Rw{^
zY0Lzn{dSuJ!L{#27UHh>0G-5FYB
z;r>e9@v$O2ueLeayc-X0wufK_$r#LjbQfd+q-;SRQuqY08dW=w*m9@@KL%h@RkkH;
zCb^oi`Y6_O4U3juZSh-
zTv=XP39U6|&A@iTp2>WgwPYa$3uW2_pE`bc<1}Wn*$T_HmSA7k?yBdvv*GV%z{G+PgeFU
zLcZdA6rtte(sJ&(a7c^JLtWj#+Ihd@*%JF;xxh_}r8H>>^{-t2_O(lg|MAzkiE~R8
zJw{5anV
znCY>APKG$gnt|gs;J-M-{_tyh+fh(SFFC>Bmw=(PE$zyWBv}fD6W>;Xu~7X=j^s!(
zwMYTXMj60d1gCE5kp|coWdV&S2bhnfjoddf9}5e>G+Q3W(XfH`4IOWvtvsaf`rECU
zo~^}ptJ<@5AS~?;K3jS?ME-1Bc|^JncWS^#p4;bvXG_Gl2gB0FsGTMM_>km|Jy9Qj
zqCW9N{ZLAe!zuho3V$JmzZjNbYxK0wYDargkM*P;?@4{BC-vo?)Du0aCwo#)VZ4>_
zm5o>JEZ}Lo@1bHFc3#-(HO$xKBdh{*>N#Amce&np4%eA3*YtC^X1ZLnT`uh@z2>@H
z^UvXWv&;3?bGR0|TyJ-|p0>oD8{(&PW&sVK3L
z?053<{3!P4<9~zx_+JEQyFR91EY7u^8u$wo9Z;-U3n^Y9pGUEw9o}uS>%jUsyWMi&
zH$>Q3xe#kXtI2q*n}|)}2F=)T0>=+Rik4!Pd|q6<5Cn|V>2jI#2Ioi6Cx3y!iv-F9
zNH>0rz;OZ;o5TanS<^;n+3{;2%sfe$-cL|`KQ>mNB0^Tgg}gv=bjS%CW^)eMoWSPe
zVTf|sCUZ^rJwFMmK%9M=sDIc#YvMeB{t8rPxt9Ij`bkXH4DtTrz@g
zeLh|1MKJ*K&D3|S6Gj~;81IdxVhp{0lI5XT98fK}%5JiVHlaQ$Bkfsw76&(_q&)`EU
zIwZ&<5)~a1`Xv$-9nyS4qC${(o>od98AuZ+6iz`kK-By{EHJF(KGUqxmB2Jb;LXQL
z+8Y3`;tl=|AQ^^Sl(XrdqRWb`%PM|_o|xLD>T*%($)l8H@yiq-1HX)dZd170-*$4y2xmv?pb$s>*P?F0B%xft3hEgiDS-Kh=0xwOo(4YieNCUnY$)9ED
zs$%0J^Bq1#twfjT$h-3cI*G0z*8pt40}Mh#ht{!Pn;(@wK*<+tRoCwlu*_(M1ki
zMr4HgMqgyuGU5z6J&jT6khF!EWS8DTlo#=cHX;ENvBz$Ilzk+RNTax@LIPrz-6Fml
z0Nx+<3u*m5q>&9f()v=;2BShs8YJxwLE12+4MYP%+CUF!BPnTmO4@;_n36_)9)z?}
zNE?g>g|xvQ(#BHKaw%!!Q7I*j^pX{SMp{T^k=tYz*3Av>!oF!_lzlX}CvEFQj_f
z)4N30gndQyw)QY$z;z{kX(V}@n~E~9`7Ofm6YUc%I>d5V<~A9(NHuJ
z9gN1JiMoc}r!8@jw~^xv?#P@j?wm3zmD9-)yFi4u3Ct6iBY-rT#6KjkLSPvHiHMe$
z9Ki1pZJoe50v{9jh`^5soG0)Jfu9k0pTGqIpAx7Npqv4}NZ>4icL>}d@EL)h5TO0c
zO#(kBaFxI!0gJ#*0@ny!CQu`=N??h=6@bbi;bk}_Vq&E^jdup`tT?uiXVIGT2ue^)
zC&f%s%%TuujobA>LdF(k@
z5y!wj@7nIXqFuRusOX4Mr=@y2%+p63-)4)I1dUBlRz=qEM@4j7s1AQCn3OJLPL#qBRy2+@ZtcIg7W}$
zP2P~BK3je$%aXVXD`93s-H~?W9c3r8qwZ++tgYCY`+BHF^0vH){OGSK%+HE2UtNMZ
zA3#!#c0WBdH53*N;|(SO*1&z^#FiXN4>Awb2r0Z<$n@E=uh_~1>4E$}djRYVwi2D~o)t68
zzXmJz{Zb{*w}A3r5cqBTMi;
zLq|BZ!n6Za97Z<$ZWAe|q_btvVqM%mCVgMbWwpOkour*4lItRq>)hxafJzUqcKz
zNw-s%)1a#$A~`Uu~Y{{I=trlldIgR-3r9@fs@WvL$HZ_
zd2+KLS0gT>;u0p(<8T(jH;Q#14``v3p{
literal 0
HcmV?d00001
diff --git a/projects/EfficientDet/efficientdet/__pycache__/final_syncbn.cpython-37.pyc b/projects/EfficientDet/efficientdet/__pycache__/final_syncbn.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4390e440cc1039aef4e9b7b17f92d67c18a6a776
GIT binary patch
literal 6676
zcmb_hOLN@D5eBeWEcVIeLwrfL*DpzHOQbB4qAkmkCDC%ZBGHT~$IjXc1TL@?XaKt&
zU?r0QV<%KXU6gVzIizx+a!8dA$uXx?-Yt~0$7qIyOJl@WSMBiRceYTQ=VE=
z1ux~MJ*}n*T6Hs?Ueg7gcC%itmhYx%BcqC%E%iCS@ug
zxnX-vw*E$=;nW@54@D!m<|j+drhC>}Ha&aEv@FM8<>iIuJ^$(*)Aw!nircJz#D^B_
zR(R(p9Ga`PIh&HC
zT&43kzEO`&56)M}gC974c<(Ua7-~_d*BigON^jfa{e_GP>BYg>)QJ4ZYYc*XnwDlkGita;Q-NM{?3rd~-XTfwqoDfrKDksdpc9Sbt7FT~b^7}Xcx^e%~
z>c77D=U;BDEM2OmxEdmeaNTQqh!t&*r|ed+div_IfBfy-LM4*J)Rq#Ahw4w{NRA{U9VtL9N&z#G
z3e=-CFdJ#WT$BOkBOO?Xq_xcFGJ7E`f=XNRgoF}OYiM1|Mj1lz^pgqtsRUgKpN99*(;g2#a{&L@1NhG!z<>S#{_z9&Cl27BJb+)t_+JQLTsviH
zz|&Utp%iArm=IA*h=O+6{z_CgUnaRLXTMidQ#56`B2U+d-f)
zR+(v4ukduxZrP0Mh9QEi!E;XF_(5p;b(^acQF-Zd5ZH|Nt~VHKGB!lU3=@nH93c<}
zjuAyxvIzppdiX$V&ajX#b#n(WX7@5?b~6;;=ecFriP(w&%_wK(I`o81qje6{IYG_y
zVW@KLyS8f}TKYEjIt0OC2!4gBzuVc)cph(e_cm1Kl9kzHWu{*#bxn@9L|*ESsaHQ2
zkDyndPu5vM41gj!_04snsN7HfB-LWzEDaD!ME*
zpT&z*bXm|vEGoJz^h+!%x~%z_MTKB7+Mig49F!LO8um^#u-(Qton@HQS;MHiW)K+0
z7t)vSPkVki-LgXru@OvfAlF^JXZv^B=CtQoaFduN(=FCq0~t*BGX4cS++Arn$T5vz
z-LJ3sQ>}G&0!BW8Uw|`@B;|8*N!E7%DY~r4x~z)7+<~;3Xwl@7Qb^Qyy`_|8@s}#d
z;xDB@Qt4^`)@enR$K@RK@4*B48^ir$##A?g)F~#>LnQ^NtM~;JDzLptK{-MS63LI`
zO&|iEyjWG@@KJ*Z=nqhE`4e^vWeh(cPNv@URvh2{11f;3%YM
zf(Z-Cu!SsO3z7U`YE#)vZ6e8mEh=n5y0W80X;4UGc2bc73JK4S8l^xXt=UNnxvXEp
z+v&~BroNfoObcmBd@_*AMY&K%`jfL{BtW#8=feAv~NO=ug83JGn{v@-;4!_YPm4G3)m`)C_U
zXwwtgjzpz|HtKT}+Qy)5Fd7uv2KUi6p3s&_Xq$-232kJTgcS0(ke=-M_-O27@Q$JV
z3G_4^4U3+J_vz`$L{ELY%j8Y?S0t4?PentCUTLHiXnq=+k3>g=<|F%PekP&0@88lc
z&Cf<73C-}#&U4WGJT#9*V?y)TKAMjwH23F0VVC9;(P%<5`ST<+SD|?#nh=^N_R;)8
zLbI0e`-{rULkmw;3mNpg7XCL5qyt;mOOiv;QIvc6D$y10922PD8mfUTxreVH;dn#
z*ee|$#XQm>6fyZD3b)?Q~u(QMTwK=P-~h8U}Jq+`NG<7{PXIaRjB>yNm1I
z#O3$9i4!+)1+1$9t}Tm=Ni474)xgbbtc0%jyHj8TXkcZ6LBL`)&vSHxhE#mj4*Ka3
z&x6ClILD2b+&&pN*SmKnY!ce)(3QZNOncTtS(e1HUkOud>bA5k
zZ!6oWZFM`ntu=6@w^H}@FdfNT@`AJ_|D292nmDqkxP)athORn=5_;%kQ_@R^gy0Nd
z71Fm(ZpxwbAoTz`Ea@YKOtY4t2xtsicH`Oj
zGxoBZsSCLAHQnGMh^{=kI!+oXLu8*3Oc6{2c$y&rwRw8QX|()mK8`z|Qgi;n~jA|7K$n>w|15(>TP9f01%kRUXZ54D1+F!5nLZu~ro(!QCFCl40&Q|Ie(TylY)BW4^#I>^<#Ds$bJx)n9!zciQbHf$~*s29H(<
z`4t=UQUm34sHz0P38yK^XrEHloz&?&_U&e@&upKYI+@q^tjWBi}E{v4i>=*&?93Cj>@8
z?mFrdoU7TfS3=)9IiUwGwEWU7on7!->8=r5tp;re+E8Jpd%#Kt*1GXEO2}4L6nY#y
zyb+)!on|wDa$AnnO{-xGEebYgUbq=N84KH$4A4iN#k9)gmnNnnmU)tog4t*>4D>73
zqpO0=VOqrcdQi+RAguP&V=2_QNO_Rt)dvzW5POLd!El4`{tCvKX*|G#J_H}c4_*(1|1UZa
zGFSlVQ|s(tQ)zh)hMCagf+MEvL}8&=#Z9%ok`;TXAeJK7$ctQTTn{!f5$790obzA<
z0JG6K1JP=98B6%hsN$Gcg~V(W0&01H1rZsB+x0?LVXm>T%dDLGv_w)KFuZIi_;#Qo
zOQ`^aCeI0S5e0@RNdz1M(glVg5#zu}A?<~p@ziu8q^UzBK!X++T=jls^yMuB`2m%I(4165fRvL^N6*}t@aOJ}}K=r04y_?0tEGk!XXhVcNx
z&|F&B92HN6lnED=y*_*%3q~Wx=GRcw9T24ltn1HM*pq9em$Q=W(1RLCN4ww!VACPk
zzr)H}$+&ammi|x9kHF6d^|Ed`H3JuZQOLbm@>A5bAjz}Lig0;NcEzdxm0|H4b5K>x
zcMjoaQ56ge8Hji=UYs94fdx-JO8{qp+yk^PVT?1ER}yR{`B`ee4HpxSfWz&TK{UlW
zNK`OYz{3z^aW#BybIc$X^#8K;c?A4@STE)UvjCqR;M~S@AlbMtWHL(NAK?^f%$CoR
z+%hiF5A$+5GgaPNY;!I>;hKL+Q8*s5o?%Eb@+x3hwn2pMsc3r-J3NUAU>TK`_3fmv
zA>YBFWtIxCiL2xW2;=VpmPg92H4V!i+uOzWRm@`gvVm)A02KuN$IzoNr;sZxXBC3i
zfrr84fZnA0Z!AN3wgOJOj-Ro|M=f-Fy`%5^Gn6s9AIgcsmH@7V{RQacxpDH`9-_Z$lXyppQ39uXCXs0S
zSF27iOQfoUZWXFC)k&%({vOH?QJ^2qg=!(>^~wkE8)|)Fk8#w7n`hrKHF)
zPA#dTPHX5Ty^Av%x`K}ysgfx$YCYYsN|q>V^^r!Vlo4gUo^9kxIZ-z1qm8lBSYy02
zF5Xl1d}E?C(U>ev;(f+VyVf(gH1&?;j<}g;lAH0=+H`GZOD)X;mvwW%%YF=owRt?4A!rPUKU-rFbc-spPzC3gq
zezR(?`jvKbq5YJPuRF|lnq|)pz&NVPp~JuckmQV?+T|sFH@V>T05_yE@on@QM46{b23gM!W7cFf50Ln{b>6YPbc*kD~w%OS^I;
z@5z(WrV>fS^F3LXA_c_cm7>b^u*J%2JQeuWM$32kc;KvK!CTB;b;=JPJIqBRwH!X-
zvd<0X1&|rR${0+d6L7l4s3lD>p0JXWEqUYEkg+aw+a_bC@e-57n9rjSNj1p99L6hT
zM1gFmksPV65-GUVS!q(5!96Kq4(bYn3Xon9z(7Z7^sX(IVzurCYzBBXOO+a%$Agau
znT1%iJ0Uwpm~?mQwtskDW{(Bbt~)V{CU$(-#CnZauu7(FH=TxO+uXA4hS-QGXKedX
z$EkN~G6Ah#x#5LtEtj1@7wi-PlG8pw9A?u1R0xP$rkpVZri_>5qFr|bcR=lW!W4@V
z^dRjijZfb78ZGuT5o$4wWF?3KNsw3xQocReiDg9*eRADtx9d-#AhZBmh|5n7kCkTf
zGVXxrNnqBnPJ3dof_0(BjJ5!Km4M*HFeaQq?J3-XuqiWoFXK)wQWJCqD**c}z%Ei;
zbw|cib9L8nQ#;sW70sicPCu~8{M6W%DzZ@K*^&{f@=|IW`>AAt0a>2E?X|K0{eam&)p}+3kb6gi&pHy)OHOiHnE7c!P{_357*?4xn`rn`Z*FW7|
zy?4E+vez*{ZZ=x9g*y$ddhGyGGXfo3hDK_7^?+v}qx)}W8U5JhN|l?WZ$&BPl+gkx
zY|F62nq{GDK$)H+xK-T22LPcI%22(^uCfQ?hixQ1G}(M4V~f$&l1WSeRctaT(p_yQ
z9Z9w(>agXAXOS9ds7dXZsL@^8dgjUt>VTaDxP#HB+VSMMQDv=8JK)-??*wd-+AI-R
zCO{kY1E}iNX(RbfKeTN|ees-2YXA-C*~Ac@UWI8B#iGHA
zMC}rR%LEQ~j3r6icDe2Z0f-6lqK(4d062>~Ak`z8IoXsoMUfBxtO4A{u|auQ=#6&x
zo+3R29Sp4uorVR8H8VPmUY1}~uz|@%`(I+T?2)zwF+J3hnNwGRX~HM3L@M+`-=h
zgwhrmNpc-3HIioK6R_5XGf~rOI!UW287LWH3S~Vsw-ojrDpeu*lUNd4Zx%wxd`;iN
zxE?BOjmoY%C+)Wo-x}iEXQ(xj8l(HI(f#&ssP@3OhS)FK$_vtnV!w!OP_)>$2tn-U
zO0&t+$;`Rd1_!tigv@t6KJw6GPzrY7Z+P7D1HTE|0at^)MW3p5uL+$(^1?NTRReC2
z;jRX~RXZr{zk_xGiJ}BkMt0g#GV&NLVE4IkA{j^D3Jl6NnFx}fTeyRN0ElF=+^+OT
z%AC~a<*8r@RPdZcex9q$l7$!VU_Gok-ZZM6k>CT8R|r1^^5+-|bg1td5D6nnMP`()YI};ZtH5n#esv8yFKo5BvDym5R%6)ac0g5_aY@dr7nO^Oi7keoGO3XBF?4l$o836H
z+4na$yGg?rvM{ivX$jw_UWI?5WX2d_%D9t@HalIxI>5eh#TJHf^{@`E4il@9d38-U
z4HIkO4H1!>-BD1=QQYH>@2Hg&Os+8}d4`wrOfTL2S+3>gKedE%py(%C8j0hbQbs6;
zEY@lUv5bG`-pc*Zsd~m749vbFWg(g
zy_|g5Z)UHdd3e4Mdx!fUT`s&{xOX-AqSBFLeG2nIz|#le*T6CLWYfHrPcKIyY9LxUq{a%<*)a3
zi**wdkE7g!3^_OmI3^ASPvkiLKSq-9kye5v3IgM?c8kK~VH}{9J&ORq`}Ucp=OBCv
z&lLALB3zrJ2SVaeCvhGsI5djhMO{6B?7Rzq$wvg*T^5K7L`{g&f+dE3r~3M>c5-rVn;CdcLnNB$sDgOpW=gLYkpS6Yq^%6`uv69r_IqTC^Sb^;YwcOUE<)i^=Ybh
z$u?nMBs(3MyVf4L3nQ>lsh`WN5N39>VUAqqIdZE-cmdl3dqb{rI7*suOW7kDW1?*j
zL;Z}4FZE$xqBbvTwJ)ii5Vh$qshvFX+o>bJo$ieqW0>hH&-Rt)`pWa+F<9()&ejg`
z>G%NV!~o{x0Oq9uOkn`?jRDN50nE!`ruItsYVEX}20Y`A>?p8FnH^e#vsi~DaWe$w%Qqt=35v>-JR!c{;T25MBNm>nZ;%d_Bo8fP5
zkxX9`PHGluL{`OCx$^=}QJ+_IRLsV%;)B?2QV;D>gk8Ezp)d+r<~{28(19XvSp3$$
z_shNu0`QTb40!IXAn+Jjx3?K|mpUAfhM$|^L5x9Q!w
zkA4tXLkWtgQTQ_4Y%kkk%Whvm>53>_<)iII+ig8gqHZ9T4kMNiA|}wH?ybWahhlBU
z=|VmM+5^a3k5+7Ocn*z3XUY4OL+@An-@EZIdRf0>ciLQJ0G5-|Qc_x^l11&}vB%}O
zuecod6PM#Yc>12h5M9A`i4+;vV5tKn@+hskc$w>9(c?>8`(5w7-{Y2ev5_(DpkkRS
zEb-`}mSPk=dZ@)1MUNf|)tB)hJ$fk6A{0G(DD+Dxdh}3NV-!6EN+f>Zo>L;U=X@uI
zLr95`i$jta3kZq!@UUnbH35?Ul9bIM7&4NdrI?C_gn)4%DKM$z)x$Vzpw$q}SNTfK
zAfm<(ZQ~!&`5H>9CeI?^8-SSKaQw!^zP~*f08&~sjr$g{?kr3z%p&waB!8$5md(@m(!aQ9@2>7oi+V=_ZWnhx%q})7(sN>Y_EisHIS6
zMOJ9m(vd~kDpKb)YX%|WCXDee-I-{F(#DaFFmZ$oa+s~M)C;IwJ
z<`9h$f5CThSft&PQJ#(*x*VSb<|R-rfbvu{B`8k~q5O?L$_dA#+yTm`BJen&4F2rC
z49c&7@=P=%D9;R`{AwTNL_VU-0m`SN=|0ND)ia=c7L@0rIYD`D2<2iQWuwP^;Lb&}
zebnZod^8zNN3+pNH|y_d~+{6#>fcP%1fzOJdFUCsg;x1R?@IBk)rKn*=@~utVSv2y_TgpwE6z;71*i(F|h;a(ON2Zwih
z`zjK?cr3<`rTDRohnO}u9(y8(LKdpi784Ri31ylM%48YERJZ{u2-L;!@vNu0*z4kdVa%7)ueMz;0_M#e;n@hy~)en$$CWeJ%gK&Jv<
zEgSR$v?K`-SweG6GsyH%l2^g-75S8$|5%!3)T)d-xu`3;0ur$O43SGmnn?4C0~m4q
z;+n_~rGd35KZNX%N~h$M9}?LiI+TGL5yvx>BZ_~Bq}=gOM?`)o?@k~;ly)cGDI||A
zciNr76LmPO$skWO%Poq6e_OCm5{I&sf~1R{WN%?
zZ!6pCwzjQr8{4UhNm}cf8S0U|CEv!m@+ai($2n@TgbAqx
zW#J;1?V=f<6)gf8^G8r4-z`2#qT$6_O>EOiy#CsHgTjY$tI>Y*m$Xr7K8Ce%W?v8z
z2WH_79Nszg;7t^I-xO1Q;mF?;eTpSm3jk?b{7b;&x;X1Ej>H=5@2Glzmf{l%M}Cvr
z`G1%k%}eaqEt-%>Xyh#Tq43tMjfo)>OBR#TxOCsb9oz$W@?F>@X-~Q>J$MtTc{=&S
zNeP{pY{65DRA!;9!3$Gr26wEZf4VD_mRTj_EQMTJX
z;0{C-7qjgnbjdYJ$ndd;jxYWx(aiwa?5{