fix start mode (#514)

pull/521/head
littletomatodonkey 2020-12-25 13:38:43 +08:00 committed by GitHub
parent ed39003110
commit 4472c87cf6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 28 additions and 28 deletions

View File

@ -202,7 +202,7 @@ If everything is ready, users can begin to train the network using the following
export PYTHONPATH=path_to_PaddleClas:$PYTHONPATH
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
--log_dir=R50_vd_distill_MV3_large_x1_0 \
tools/train.py \
-c ./configs/Distillation/R50_vd_distill_MV3_large_x1_0.yaml

View File

@ -523,7 +523,7 @@ Users can use the following command to start the training process, which can als
export PYTHONPATH=path_to_PaddleClas:$PYTHONPATH
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/DataAugment/ResNet50_Cutout.yaml
```

View File

@ -95,7 +95,7 @@ After preparing the configuration file, The training process can be started in t
# PaddleClas starts multi-card and multi-process training through launch
# Specify the GPU running card number by setting FLAGS_selected_gpus
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/ResNet/ResNet50_vd.yaml
```
@ -104,7 +104,7 @@ The configuration can be updated by adding the `-o` parameter.
```bash
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/ResNet/ResNet50_vd.yaml \
-o use_mix=1 \
@ -121,7 +121,7 @@ The format of output log information is the same as above.
```
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c configs/ResNet/ResNet50.yaml \
-o pretrained_model="./pretrained/ResNet50_pretrained"
@ -137,7 +137,7 @@ Among them, `pretrained_model` is used to set the address to load the pretrained
```
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c configs/ResNet/ResNet50.yaml \
-o checkpoints="./output/ResNet/0/ppcls"

View File

@ -74,7 +74,7 @@ Paramters
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd.yaml
@ -92,7 +92,7 @@ The validation `Top1 Acc` curve is showmn below.
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_finetune.yaml
@ -122,7 +122,7 @@ Tringing script
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_ssld_finetune.yaml
```
@ -137,7 +137,7 @@ Training script
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/MobileNetV3_large_x1_0_finetune.yaml
```
@ -154,7 +154,7 @@ Training script
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_ssld_random_erasing_finetune.yaml
```
@ -191,7 +191,7 @@ Final training script
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/R50_vd_distill_MV3_large_x1_0.yaml
```

View File

@ -239,7 +239,7 @@ use_distillation: True
export PYTHONPATH=path_to_PaddleClas:$PYTHONPATH
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
--log_dir=R50_vd_distill_MV3_large_x1_0 \
tools/train.py \
-c ./configs/Distillation/R50_vd_distill_MV3_large_x1_0.yaml

View File

@ -531,7 +531,7 @@ new_batch = cutmix_op(batch)
export PYTHONPATH=path_to_PaddleClas:$PYTHONPATH
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
--log_dir=ResNet50_Cutout \
tools/train.py \
-c ./configs/DataAugment/ResNet50_Cutout.yaml

View File

@ -95,7 +95,7 @@ python tools/eval_multi_platform.py \
# 通过设置FLAGS_selected_gpus 指定GPU运行卡号
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/ResNet/ResNet50_vd.yaml
```
@ -104,7 +104,7 @@ python -m paddle.distributed.launch \
```bash
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/ResNet/ResNet50_vd.yaml \
-o use_mix=1 \
@ -119,7 +119,7 @@ python -m paddle.distributed.launch \
```
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c configs/ResNet/ResNet50.yaml \
-o pretrained_model="./pretrained/ResNet50_pretrained"
@ -136,7 +136,7 @@ python -m paddle.distributed.launch \
```
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c configs/ResNet/ResNet50.yaml \
-o checkpoints="./output/ResNet/0/ppcls"
@ -151,7 +151,7 @@ python -m paddle.distributed.launch \
```bash
python -m paddle.distributed.launch \
--selected_gpus="0" \
--gpus="0" \
tools/eval.py \
-c ./configs/eval.yaml \
-o ARCHITECTURE.name="ResNet50_vd" \

View File

@ -74,7 +74,7 @@ python tools/download.py -a MobileNetV3_large_x1_0 -p ./pretrained -d True
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd.yaml
@ -92,7 +92,7 @@ python -m paddle.distributed.launch \
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_finetune.yaml
@ -120,7 +120,7 @@ pretrained_model: "./pretrained/ResNet50_vd_ssld_pretrained"
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_ssld_finetune.yaml
```
@ -135,7 +135,7 @@ python -m paddle.distributed.launch \
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/MobileNetV3_large_x1_0_finetune.yaml
```
@ -151,7 +151,7 @@ python -m paddle.distributed.launch \
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/ResNet50_vd_ssld_random_erasing_finetune.yaml
```
@ -190,7 +190,7 @@ TRAIN:
```shell
export CUDA_VISIBLE_DEVICES=0
python -m paddle.distributed.launch \
    --selected_gpus="0" \
    --gpus="0" \
    tools/train.py \
        -c ./configs/quick_start/R50_vd_distill_MV3_large_x1_0.yaml
```

View File

@ -1,4 +1,4 @@
python -m paddle.distributed.launch \
--selected_gpus="0" \
--gpus="0" \
tools/eval.py \
-c ./configs/eval.yaml

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c ./configs/ResNet/ResNet50.yaml \
-o print_interval=10

View File

@ -3,7 +3,7 @@
export FLAGS_fraction_of_gpu_memory_to_use=0.80
python3.7 -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \
--gpus="0,1,2,3" \
tools/train.py \
-c configs/ResNet/ResNet50.yaml \
-o TRAIN.batch_size=256 \