From d865e2cefdc92f98a27358da598ac1d835857fff Mon Sep 17 00:00:00 2001
From: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Date: Sat, 8 Aug 2020 01:37:21 +0800
Subject: [PATCH] Rename CosineAnealing to CosineAnnealing (#57)

---
 .../dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py          | 2 +-
 .../dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py   | 2 +-
 docs/config.md                                                  | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/configs/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py b/configs/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
index be9ffc0a..8add4060 100644
--- a/configs/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
+++ b/configs/dynamic_voxelization/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
@@ -27,7 +27,7 @@ optimizer = dict(
     weight_decay=0.001)
 lr_config = dict(
     _delete_=True,
-    policy='CosineAnealing',
+    policy='CosineAnnealing',
     warmup='linear',
     warmup_iters=1000,
     warmup_ratio=1.0 / 10,
diff --git a/configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py b/configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py
index 31b1fe72..c7cc6001 100644
--- a/configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py
+++ b/configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_2x8_80e_kitti-3d-3class.py
@@ -228,7 +228,7 @@ optimizer = dict(type='AdamW', lr=0.003, betas=(0.95, 0.99), weight_decay=0.01)
 # max_norm=10 is better for SECOND
 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
 lr_config = dict(
-    policy='CosineAnealing',
+    policy='CosineAnnealing',
     warmup='linear',
     warmup_iters=1000,
     warmup_ratio=1.0 / 10,
diff --git a/docs/config.md b/docs/config.md
index d4c5cb00..b83b4f8d 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -324,7 +324,7 @@ optimizer_config = dict(  # Config used to build the optimizer hook, refer to ht
     max_norm=10,  # max norm of the gradients
     norm_type=2))  # Type of the used p-norm. Can be 'inf' for infinity norm.
 lr_config = dict(  # Learning rate scheduler config used to register LrUpdater hook
-    policy='step',  # The policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
+    policy='step',  # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
     warmup=None,  # The warmup policy, also support `exp` and `constant`.
     step=[24, 32])  # Steps to decay the learning rate
 checkpoint_config = dict(  # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation.
-- 
GitLab