From 24372cc6e5deac02649bc1570314af4d047e3bcc Mon Sep 17 00:00:00 2001 From: gaotingquan Date: Thu, 28 Apr 2022 14:19:01 +0000 Subject: [PATCH] update: update the default gpu num to 8 when using AdamW --- .../CSWinTransformer/CSWinTransformer_base_224.yaml | 7 ++++--- .../CSWinTransformer/CSWinTransformer_base_384.yaml | 7 ++++--- .../CSWinTransformer_large_224.yaml | 7 ++++--- .../CSWinTransformer_large_384.yaml | 7 ++++--- .../CSWinTransformer_small_224.yaml | 7 ++++--- .../CSWinTransformer/CSWinTransformer_tiny_224.yaml | 7 ++++--- .../DeiT/DeiT_base_distilled_patch16_224.yaml | 7 ++++--- .../DeiT/DeiT_base_distilled_patch16_384.yaml | 7 ++++--- .../ImageNet/DeiT/DeiT_base_patch16_224.yaml | 7 ++++--- .../ImageNet/DeiT/DeiT_base_patch16_384.yaml | 7 ++++--- .../DeiT/DeiT_small_distilled_patch16_224.yaml | 6 +++--- .../ImageNet/DeiT/DeiT_small_patch16_224.yaml | 7 ++++--- .../DeiT/DeiT_tiny_distilled_patch16_224.yaml | 7 ++++--- .../ImageNet/DeiT/DeiT_tiny_patch16_224.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml | 7 ++++--- ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml | 7 ++++--- .../SwinTransformer_base_patch4_window12_384.yaml | 7 ++++--- .../SwinTransformer_base_patch4_window7_224.yaml | 7 ++++--- .../SwinTransformer_large_patch4_window12_384.yaml | 7 ++++--- .../SwinTransformer_large_patch4_window7_224.yaml | 7 ++++--- .../SwinTransformer_small_patch4_window7_224.yaml | 7 ++++--- .../SwinTransformer_tiny_patch4_window7_224.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/pcpvt_base.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/pcpvt_large.yaml | 7 ++++--- ppcls/configs/ImageNet/Twins/pcpvt_small.yaml | 7 ++++--- ppcls/engine/engine.py | 13 +++++++++---- 34 files changed, 140 insertions(+), 103 deletions(-) diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml index 4655e02b3..a7697840e 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_224.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1.25e-4 - eta_min: 1.25e-6 + learning_rate: 2.5e-4 + eta_min: 2.5e-6 warmup_epoch: 20 - warmup_start_lr: 1.25e-7 + warmup_start_lr: 2.5e-7 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml index 1e6b1f79f..a7100289c 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_base_384.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 6.25e-5 - eta_min: 6.25e-7 + learning_rate: 1.25e-4 + eta_min: 1.25e-6 warmup_epoch: 20 - warmup_start_lr: 6.25e-8 + warmup_start_lr: 1.25e-7 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml index ddeacadf0..7c96343df 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_224.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1.25e-4 - eta_min: 1.25e-6 + learning_rate: 2.5e-4 + eta_min: 2.5e-6 warmup_epoch: 20 - warmup_start_lr: 1.25e-7 + warmup_start_lr: 2.5e-7 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml index ab477ef2e..4b682fec6 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_large_384.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 3.125e-5 - eta_min: 3.125e-7 + learning_rate: 6.25e-5 + eta_min: 6.25e-7 warmup_epoch: 20 - warmup_start_lr: 3.125e-8 + warmup_start_lr: 6.25e-8 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml index ec3c5a145..a191f4160 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_small_224.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 2.5e-4 - eta_min: 2.5e-6 + learning_rate: 5e-4 + eta_min: 5e-6 warmup_epoch: 20 - warmup_start_lr: 2.5e-7 + warmup_start_lr: 5e-7 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml index 3e3f92525..3a2be2837 100644 --- a/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml +++ b/ppcls/configs/ImageNet/CSWinTransformer/CSWinTransformer_tiny_224.yaml @@ -42,11 +42,12 @@ Optimizer: no_weight_decay_name: pos_embed cls_token .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml index 979a04a38..8c3cc4c34 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_224.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml index 859f57d72..0b8c2e808 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_base_distilled_patch16_384.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml index 3cdd10202..938916caa 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_224.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml index 88a8fbae9..4cbe6ffde 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_base_patch16_384.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml index 54d962e68..d5ba0cee7 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_small_distilled_patch16_224.yaml @@ -41,10 +41,10 @@ Optimizer: one_dim_param_no_weight_decay: True lr: name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml index 05c3ac1f3..a167c896e 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_small_patch16_224.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml index f66617613..319e17025 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_tiny_distilled_patch16_224.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml b/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml index 647050a77..1234d79b6 100644 --- a/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml +++ b/ppcls/configs/ImageNet/DeiT/DeiT_tiny_patch16_224.yaml @@ -40,11 +40,12 @@ Optimizer: no_weight_decay_name: norm cls_token pos_embed dist_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 1e-3 - eta_min: 1e-5 + learning_rate: 2e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval DataLoader: diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml index 6c0854cb4..27fc20b99 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B0.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml index 42134c740..20fa39773 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B1.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml index 4d0d5a432..cda94496e 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml index a5feb260b..2d48178f0 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B2_Linear.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml index be300aca6..581a70605 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B3.yaml @@ -44,11 +44,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml index b6a895339..92da84d1e 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B4.yaml @@ -44,11 +44,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml index 9d36b2807..4bb2449a4 100644 --- a/ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml +++ b/ppcls/configs/ImageNet/PVTV2/PVT_V2_B5.yaml @@ -44,11 +44,12 @@ Optimizer: no_weight_decay_name: pos_embed1 pos_embed2 pos_embed3 pos_embed4 cls_token one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 5e-6 + learning_rate: 1e-3 + eta_min: 1e-5 warmup_epoch: 20 - warmup_start_lr: 5e-7 + warmup_start_lr: 1e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml index 4dd0ac4cf..afc3fdcd2 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window12_384.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml index a42dea1f9..4920fae6c 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_base_patch4_window7_224.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml index 36b5e5e38..a6dd74267 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window12_384.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml index 96a9befd2..564da72f1 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_large_patch4_window7_224.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml index ffbbcf080..ba42f1efb 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_small_patch4_window7_224.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml index 066db715d..26fa0ba61 100644 --- a/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +++ b/ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml @@ -41,11 +41,12 @@ Optimizer: no_weight_decay_name: absolute_pos_embed relative_position_bias_table .bias norm one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 20 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml b/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml index 74c402ee7..36e5b086d 100644 --- a/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml +++ b/ppcls/configs/ImageNet/Twins/alt_gvt_base.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml b/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml index ca66e9a33..6e19d6461 100644 --- a/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml +++ b/ppcls/configs/ImageNet/Twins/alt_gvt_large.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml b/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml index 9e97c0f99..66235960a 100644 --- a/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml +++ b/ppcls/configs/ImageNet/Twins/alt_gvt_small.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml b/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml index 7831e9289..96745495a 100644 --- a/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml +++ b/ppcls/configs/ImageNet/Twins/pcpvt_base.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml b/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml index 8e160b3c2..ca4baf942 100644 --- a/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml +++ b/ppcls/configs/ImageNet/Twins/pcpvt_large.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml b/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml index 582382d4d..a5e5f7e05 100644 --- a/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml +++ b/ppcls/configs/ImageNet/Twins/pcpvt_small.yaml @@ -43,11 +43,12 @@ Optimizer: no_weight_decay_name: norm cls_token proj.0.weight proj.1.weight proj.2.weight proj.3.weight pos_block one_dim_param_no_weight_decay: True lr: + # for 8 cards name: Cosine - learning_rate: 5e-4 - eta_min: 1e-5 + learning_rate: 1e-3 + eta_min: 2e-5 warmup_epoch: 5 - warmup_start_lr: 1e-6 + warmup_start_lr: 2e-6 # data loader for train and eval diff --git a/ppcls/engine/engine.py b/ppcls/engine/engine.py index b36aeb70c..aacde2f76 100644 --- a/ppcls/engine/engine.py +++ b/ppcls/engine/engine.py @@ -250,12 +250,17 @@ class Engine(object): level=amp_level, save_dtype='float32') - # for distributed + # check the gpu num world_size = dist.get_world_size() self.config["Global"]["distributed"] = world_size != 1 - if world_size != 4 and self.mode == "train": - msg = f"The training strategy in config files provided by PaddleClas is based on 4 gpus. But the number of gpus is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use config files in PaddleClas to train." - logger.warning(msg) + if self.mode == "train": + std_gpu_num = 8 if self.config["Optimizer"][ + "name"] == "AdamW" else 4 + if world_size != std_gpu_num: + msg = f"The training strategy provided by PaddleClas is based on {std_gpu_num} gpus. But the number of gpu is {world_size} in current training. Please modify the stategy (learning rate, batch size and so on) if use this config to train." + logger.warning(msg) + + # for distributed if self.config["Global"]["distributed"]: dist.init_parallel_env() self.model = paddle.DataParallel(self.model)