benchmark scripts
parent
766a816d3e
commit
a1c322556a
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1
|
||||
EPOCH=$2
|
||||
DATASET=$3 # imagenet or places205
|
||||
GPUS=${GPUS:-1}
|
||||
PORT=${PORT:-29500}
|
||||
PY_ARGS=${@:4}
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth
|
||||
WORK_DIR_EVAL=$WORK_DIR/${DATASET}_at_epoch_${EPOCH}/
|
||||
|
||||
# extract backbone
|
||||
if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then
|
||||
python tools/extract_backbone_weights.py $CHECKPOINT \
|
||||
--save-path ${CHECKPOINT::(-4)}_extracted.pth
|
||||
fi
|
||||
|
||||
# train
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/train.py \
|
||||
configs/linear_classification/${DATASET}/r50_multihead.py \
|
||||
--pretrained ${CHECKPOINT::(-4)}_extracted.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="pytorch" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/test.py \
|
||||
configs/linear_classification/${DATASET}/r50_multihead.py \
|
||||
${WORK_DIR_EVAL}/latest.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --launcher="pytorch"
|
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1 # use cfgs under "configs/linear_classification/"
|
||||
PRETRAIN=$2
|
||||
PY_ARGS=${@:3}
|
||||
GPUS=1 # in the standard setting, GPUS=1
|
||||
PORT=${PORT:-29500}
|
||||
|
||||
WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
|
||||
|
||||
# train
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/train.py \
|
||||
$CFG \
|
||||
--pretrained $PRETRAIN \
|
||||
--work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/test.py \
|
||||
$CFG \
|
||||
$WORK_DIR/latest.pth \
|
||||
--work_dir $WORK_DIR --launcher="pytorch"
|
|
@ -3,37 +3,23 @@
|
|||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1
|
||||
EPOCH=$2
|
||||
PERCENT=$3
|
||||
PY_ARGS=${@:4}
|
||||
GPUS=${GPUS:-8}
|
||||
CFG=$1 # use cfgs under "configs/classification/imagenet_*percent/"
|
||||
PRETRAIN=$2
|
||||
PY_ARGS=${@:3}
|
||||
GPUS=8 # in the standard setting, GPUS=8
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth
|
||||
WORK_DIR_EVAL=$WORK_DIR/imagenet_semi_${PERCENT}percent_at_epoch_${EPOCH}/
|
||||
|
||||
if [ ! "$PERCENT" == "1" ] && [ ! "$PERCENT" == 10 ]; then
|
||||
echo "ERROR: PERCENT must in {1, 10}"
|
||||
exit
|
||||
fi
|
||||
|
||||
# extract backbone
|
||||
if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then
|
||||
python tools/extract_backbone_weights.py $CHECKPOINT \
|
||||
--save-path ${CHECKPOINT::(-4)}_extracted.pth
|
||||
fi
|
||||
WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
|
||||
|
||||
# train
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS \
|
||||
tools/train.py \
|
||||
configs/classification/imagenet_${PERCENT}percent/r50.py \
|
||||
--pretrained ${CHECKPOINT::(-4)}_extracted.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="pytorch" ${PY_ARGS}
|
||||
$CFG \
|
||||
--pretrained $PRETRAIN \
|
||||
--work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
python -m torch.distributed.launch --nproc_per_node=$GPUS \
|
||||
tools/test.py \
|
||||
configs/classification/imagenet_${PERCENT}percent/r50.py \
|
||||
${WORK_DIR_EVAL}/latest.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --launcher="pytorch"
|
||||
$CFG \
|
||||
$WORK_DIR/latest.pth \
|
||||
--work_dir $WORK_DIR --launcher="pytorch"
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1
|
||||
EPOCH=$2
|
||||
FEAT_LIST=$3
|
||||
GPUS=$4
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
bash tools/dist_extract.sh $CFG $WORK_DIR/epoch_${EPOCH}.pth $GPUS
|
||||
|
||||
bash benchmarks/eval_svm.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
bash benchmarks/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1
|
||||
EPOCH=$2
|
||||
FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
|
||||
GPUS=${4:-8}
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then
|
||||
echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth"
|
||||
exit
|
||||
fi
|
||||
|
||||
bash tools/dist_extract.sh $CFG $GPUS --checkpoint $WORK_DIR/epoch_${EPOCH}.pth
|
||||
|
||||
bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
CFG=$1
|
||||
PRETRAIN=$2 # pretrained model or "random" (random init)
|
||||
FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
|
||||
GPUS=${4:-8}
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
if [ ! -f $PRETRAIN ] and [ "$PRETRAIN" != "random" ]; then
|
||||
echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN"
|
||||
exit
|
||||
fi
|
||||
|
||||
bash tools/dist_extract.sh $CFG $GPUS --pretrained $PRETRAIN
|
||||
|
||||
bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
|
@ -1,56 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
EPOCH=$3
|
||||
DATASET=$4 # imagenet or places205
|
||||
PY_ARGS=${@:5}
|
||||
JOB_NAME="openselfsup"
|
||||
GPUS=${GPUS:-1}
|
||||
GPUS_PER_NODE=${GPUS_PER_NODE:-1}
|
||||
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
|
||||
SRUN_ARGS=${SRUN_ARGS:-""}
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth
|
||||
WORK_DIR_EVAL=$WORK_DIR/${DATASET}_at_epoch_${EPOCH}/
|
||||
|
||||
# extract backbone
|
||||
if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then
|
||||
srun -p ${PARTITION} \
|
||||
python tools/extract_backbone_weights.py $CHECKPOINT \
|
||||
--save-path ${CHECKPOINT::(-4)}_extracted.pth
|
||||
fi
|
||||
|
||||
# train
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
srun -p ${PARTITION} \
|
||||
--job-name=${JOB_NAME} \
|
||||
--gres=gpu:${GPUS_PER_NODE} \
|
||||
--ntasks=${GPUS} \
|
||||
--ntasks-per-node=${GPUS_PER_NODE} \
|
||||
--cpus-per-task=${CPUS_PER_TASK} \
|
||||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/train.py \
|
||||
configs/linear_classification/${DATASET}/r50_multihead.py \
|
||||
--pretrained ${CHECKPOINT::(-4)}_extracted.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="slurm" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
srun -p ${PARTITION} \
|
||||
--job-name=${JOB_NAME} \
|
||||
--gres=gpu:${GPUS_PER_NODE} \
|
||||
--ntasks=${GPUS} \
|
||||
--ntasks-per-node=${GPUS_PER_NODE} \
|
||||
--cpus-per-task=${CPUS_PER_TASK} \
|
||||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/test.py \
|
||||
configs/linear_classification/${DATASET}/r50_multihead.py \
|
||||
${WORK_DIR_EVAL}/latest.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --launcher="slurm"
|
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
PRETRAIN=$3
|
||||
PY_ARGS=${@:4}
|
||||
JOB_NAME="openselfsup"
|
||||
GPUS=1 # in the standard setting, GPUS=1
|
||||
GPUS_PER_NODE=${GPUS_PER_NODE:-1}
|
||||
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
|
||||
SRUN_ARGS=${SRUN_ARGS:-""}
|
||||
|
||||
WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
|
||||
|
||||
# train
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
srun -p ${PARTITION} \
|
||||
--job-name=${JOB_NAME} \
|
||||
--gres=gpu:${GPUS_PER_NODE} \
|
||||
--ntasks=${GPUS} \
|
||||
--ntasks-per-node=${GPUS_PER_NODE} \
|
||||
--cpus-per-task=${CPUS_PER_TASK} \
|
||||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/train.py \
|
||||
$CFG \
|
||||
--pretrained $PRETRAIN \
|
||||
--work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
srun -p ${PARTITION} \
|
||||
--job-name=${JOB_NAME} \
|
||||
--gres=gpu:${GPUS_PER_NODE} \
|
||||
--ntasks=${GPUS} \
|
||||
--ntasks-per-node=${GPUS_PER_NODE} \
|
||||
--cpus-per-task=${CPUS_PER_TASK} \
|
||||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/test.py \
|
||||
$CFG \
|
||||
$WORK_DIR/latest.pth \
|
||||
--work_dir $WORK_DIR --launcher="slurm"
|
|
@ -5,29 +5,15 @@ set -x
|
|||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
EPOCH=$3
|
||||
PERCENT=$4
|
||||
PY_ARGS=${@:5}
|
||||
PRETRAIN=$3
|
||||
PY_ARGS=${@:4}
|
||||
JOB_NAME="openselfsup"
|
||||
GPUS=${GPUS:-8}
|
||||
GPUS=8 # in the standard setting, GPUS=8
|
||||
GPUS_PER_NODE=${GPUS_PER_NODE:-8}
|
||||
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
|
||||
SRUN_ARGS=${SRUN_ARGS:-""}
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
CHECKPOINT=$WORK_DIR/epoch_${EPOCH}.pth
|
||||
WORK_DIR_EVAL=$WORK_DIR/imagenet_semi_${PERCENT}percent_at_epoch_${EPOCH}/
|
||||
|
||||
if [ ! "$PERCENT" == "1" ] && [ ! "$PERCENT" == 10 ]; then
|
||||
echo "ERROR: PERCENT must in {1, 10}"
|
||||
exit
|
||||
fi
|
||||
# extract backbone
|
||||
if [ ! -f "${CHECKPOINT::(-4)}_extracted.pth" ]; then
|
||||
srun -p ${PARTITION} \
|
||||
python tools/extract_backbone_weights.py $CHECKPOINT \
|
||||
--save-path ${CHECKPOINT::(-4)}_extracted.pth
|
||||
fi
|
||||
WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
|
||||
|
||||
# train
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
|
@ -40,9 +26,9 @@ srun -p ${PARTITION} \
|
|||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/train.py \
|
||||
configs/classification/imagenet_${PERCENT}percent/r50.py \
|
||||
--pretrained ${CHECKPOINT::(-4)}_extracted.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --seed 0 --launcher="slurm" ${PY_ARGS}
|
||||
$CFG \
|
||||
--pretrained $PRETRAIN \
|
||||
--work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS}
|
||||
|
||||
# test
|
||||
GLOG_vmodule=MemcachedClient=-1 \
|
||||
|
@ -55,6 +41,6 @@ srun -p ${PARTITION} \
|
|||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/test.py \
|
||||
configs/classification/imagenet_${PERCENT}percent/r50.py \
|
||||
${WORK_DIR_EVAL}/latest.pth \
|
||||
--work_dir ${WORK_DIR_EVAL} --launcher="slurm"
|
||||
$CFG \
|
||||
$WORK_DIR/latest.pth \
|
||||
--work_dir $WORK_DIR --launcher="slurm"
|
||||
|
|
|
@ -4,12 +4,13 @@ set -x
|
|||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
EPOCH=$3
|
||||
FEAT=$4
|
||||
PRETRAIN=$3
|
||||
FEAT_LIST=$4
|
||||
GPUS=${5:-8}
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
bash tools/srun_extract.sh $PARTITION $CFG $WORK_DIR/epoch_${EPOCH}.pth
|
||||
bash tools/srun_extract.sh $PARTITION $CFG $GPUS --pretrained $PRETRAIN
|
||||
|
||||
srun -p $PARTITION bash benchmarks/eval_svm.sh $WORK_DIR $FEAT
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
srun -p $PARTITION bash benchmarks/eval_svm.sh $WORK_DIR $FEAT
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
EPOCH=$3
|
||||
FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
|
||||
GPUS=${5:-8}
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then
|
||||
echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth"
|
||||
exit
|
||||
fi
|
||||
|
||||
bash tools/srun_extract.sh $PARTITION $CFG $GPUS --checkpoint $WORK_DIR/epoch_${EPOCH}.pth
|
||||
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
PRETRAIN=$3 # pretrained model or "random" (random init)
|
||||
FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
|
||||
GPUS=${5:-8}
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
if [ ! -f $PRETRAIN ] and [ "$PRETRAIN" != "random" ]; then
|
||||
echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN"
|
||||
exit
|
||||
fi
|
||||
|
||||
bash tools/srun_extract.sh $PARTITION $CFG $GPUS --pretrained $PRETRAIN
|
||||
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR $FEAT_LIST
|
||||
|
||||
srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR $FEAT_LIST
|
|
@ -0,0 +1,60 @@
|
|||
_base_ = '../../base.py'
|
||||
# model settings
|
||||
model = dict(
|
||||
type='Classification',
|
||||
pretrained=None,
|
||||
with_sobel=True,
|
||||
backbone=dict(
|
||||
type='ResNet',
|
||||
depth=50,
|
||||
in_channels=2,
|
||||
out_indices=[4], # 0: conv-1, x: stage-x
|
||||
norm_cfg=dict(type='SyncBN')),
|
||||
head=dict(
|
||||
type='ClsHead', with_avg_pool=True, in_channels=2048,
|
||||
num_classes=1000))
|
||||
# dataset settings
|
||||
data_source_cfg = dict(
|
||||
type='ImageNet',
|
||||
memcached=True,
|
||||
mclient_path='/mnt/lustre/share/memcached_client')
|
||||
data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt'
|
||||
data_train_root = 'data/imagenet/train'
|
||||
data_test_list = 'data/imagenet/meta/val_labeled.txt'
|
||||
data_test_root = 'data/imagenet/val'
|
||||
dataset_type = 'ClassificationDataset'
|
||||
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
train_pipeline = [
|
||||
dict(type='RandomResizedCrop', size=224),
|
||||
dict(type='RandomHorizontalFlip'),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='Resize', size=256),
|
||||
dict(type='CenterCrop', size=224),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
data = dict(
|
||||
imgs_per_gpu=32, # total 256
|
||||
workers_per_gpu=2,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_train_list, root=data_train_root,
|
||||
**data_source_cfg),
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_test_list, root=data_test_root, **data_source_cfg),
|
||||
pipeline=test_pipeline))
|
||||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
|
||||
paramwise_options={'\Ahead.': dict(lr_mult=10)})
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[18, 24], gamma=0.2)
|
||||
checkpoint_config = dict(interval=2)
|
||||
# runtime settings
|
||||
total_epochs = 30
|
|
@ -0,0 +1,60 @@
|
|||
_base_ = '../../base.py'
|
||||
# model settings
|
||||
model = dict(
|
||||
type='Classification',
|
||||
pretrained=None,
|
||||
with_sobel=True,
|
||||
backbone=dict(
|
||||
type='ResNet',
|
||||
depth=50,
|
||||
in_channels=2,
|
||||
out_indices=[4], # 0: conv-1, x: stage-x
|
||||
norm_cfg=dict(type='SyncBN')),
|
||||
head=dict(
|
||||
type='ClsHead', with_avg_pool=True, in_channels=2048,
|
||||
num_classes=1000))
|
||||
# dataset settings
|
||||
data_source_cfg = dict(
|
||||
type='ImageNet',
|
||||
memcached=True,
|
||||
mclient_path='/mnt/lustre/share/memcached_client')
|
||||
data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt'
|
||||
data_train_root = 'data/imagenet/train'
|
||||
data_test_list = 'data/imagenet/meta/val_labeled.txt'
|
||||
data_test_root = 'data/imagenet/val'
|
||||
dataset_type = 'ClassificationDataset'
|
||||
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
train_pipeline = [
|
||||
dict(type='RandomResizedCrop', size=224),
|
||||
dict(type='RandomHorizontalFlip'),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='Resize', size=256),
|
||||
dict(type='CenterCrop', size=224),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
data = dict(
|
||||
imgs_per_gpu=32, # total 256
|
||||
workers_per_gpu=2,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_train_list, root=data_train_root,
|
||||
**data_source_cfg),
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_test_list, root=data_test_root, **data_source_cfg),
|
||||
pipeline=test_pipeline))
|
||||
# optimizer
|
||||
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
|
||||
paramwise_options={'\Ahead.': dict(lr_mult=100)})
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
|
||||
checkpoint_config = dict(interval=2)
|
||||
# runtime settings
|
||||
total_epochs = 20
|
|
@ -0,0 +1,89 @@
|
|||
_base_ = '../../base.py'
|
||||
# model settings
|
||||
model = dict(
|
||||
type='Classification',
|
||||
pretrained=None,
|
||||
frozen_backbone=True,
|
||||
with_sobel=True,
|
||||
backbone=dict(
|
||||
type='ResNet',
|
||||
depth=50,
|
||||
in_channels=2,
|
||||
out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
|
||||
norm_cfg=dict(type='BN')),
|
||||
head=dict(
|
||||
type='MultiClsHead',
|
||||
pool_type='specified',
|
||||
in_indices=[0, 1, 2, 3, 4],
|
||||
with_last_layer_unpool=True,
|
||||
backbone='resnet50',
|
||||
norm_cfg=dict(type='BN', momentum=0.1, affine=False),
|
||||
num_classes=1000))
|
||||
# dataset settings
|
||||
data_source_cfg = dict(
|
||||
type='ImageNet',
|
||||
memcached=False,
|
||||
mclient_path='/mnt/lustre/share/memcached_client')
|
||||
data_train_list = 'data/imagenet/meta/train_labeled.txt'
|
||||
data_train_root = 'data/imagenet/train'
|
||||
data_test_list = 'data/imagenet/meta/val_labeled.txt'
|
||||
data_test_root = 'data/imagenet/val'
|
||||
dataset_type = 'ClassificationDataset'
|
||||
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
train_pipeline = [
|
||||
dict(type='RandomResizedCrop', size=224),
|
||||
dict(type='RandomHorizontalFlip'),
|
||||
dict(
|
||||
type='ColorJitter',
|
||||
brightness=0.4,
|
||||
contrast=0.4,
|
||||
saturation=0.4,
|
||||
hue=0.),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Lighting'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='Resize', size=256),
|
||||
dict(type='CenterCrop', size=224),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
data = dict(
|
||||
imgs_per_gpu=256, # total 256
|
||||
workers_per_gpu=8,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_train_list, root=data_train_root,
|
||||
**data_source_cfg),
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_test_list, root=data_test_root, **data_source_cfg),
|
||||
pipeline=test_pipeline))
|
||||
# additional hooks
|
||||
custom_hooks = [
|
||||
dict(
|
||||
type='ValidateHook',
|
||||
dataset=data['val'],
|
||||
initial=True,
|
||||
interval=10,
|
||||
imgs_per_gpu=128,
|
||||
workers_per_gpu=4,
|
||||
eval_param=dict(topk=(1, )))
|
||||
]
|
||||
# optimizer
|
||||
optimizer = dict(
|
||||
type='SGD',
|
||||
lr=0.01,
|
||||
momentum=0.9,
|
||||
weight_decay=0.0001,
|
||||
paramwise_options=dict(norm_decay_mult=0.),
|
||||
nesterov=True)
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[30, 60, 90])
|
||||
checkpoint_config = dict(interval=10)
|
||||
# runtime settings
|
||||
total_epochs = 90
|
|
@ -0,0 +1,89 @@
|
|||
_base_ = '../../base.py'
|
||||
# model settings
|
||||
model = dict(
|
||||
type='Classification',
|
||||
pretrained=None,
|
||||
frozen_backbone=True,
|
||||
with_sobel=True,
|
||||
backbone=dict(
|
||||
type='ResNet',
|
||||
depth=50,
|
||||
in_channels=2,
|
||||
out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
|
||||
norm_cfg=dict(type='BN')),
|
||||
head=dict(
|
||||
type='MultiClsHead',
|
||||
pool_type='specified',
|
||||
in_indices=[0, 1, 2, 3, 4],
|
||||
with_last_layer_unpool=True,
|
||||
backbone='resnet50',
|
||||
norm_cfg=dict(type='BN', momentum=0.1, affine=False),
|
||||
num_classes=205))
|
||||
# dataset settings
|
||||
data_source_cfg = dict(
|
||||
type='Places205',
|
||||
memcached=False,
|
||||
mclient_path='/mnt/lustre/share/memcached_client')
|
||||
data_train_list = 'data/places205/meta/train_labeled.txt'
|
||||
data_train_root = 'data/places205/train'
|
||||
data_test_list = 'data/places205/meta/val_labeled.txt'
|
||||
data_test_root = 'data/places205/val'
|
||||
dataset_type = 'ClassificationDataset'
|
||||
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
train_pipeline = [
|
||||
dict(type='RandomResizedCrop', size=224),
|
||||
dict(type='RandomHorizontalFlip'),
|
||||
dict(
|
||||
type='ColorJitter',
|
||||
brightness=0.4,
|
||||
contrast=0.4,
|
||||
saturation=0.4,
|
||||
hue=0.),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Lighting'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='Resize', size=256),
|
||||
dict(type='CenterCrop', size=224),
|
||||
dict(type='ToTensor'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
]
|
||||
data = dict(
|
||||
imgs_per_gpu=256, # total 256
|
||||
workers_per_gpu=8,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_train_list, root=data_train_root,
|
||||
**data_source_cfg),
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_source=dict(
|
||||
list_file=data_test_list, root=data_test_root, **data_source_cfg),
|
||||
pipeline=test_pipeline))
|
||||
# additional hooks
|
||||
custom_hooks = [
|
||||
dict(
|
||||
type='ValidateHook',
|
||||
dataset=data['val'],
|
||||
initial=True,
|
||||
interval=10,
|
||||
imgs_per_gpu=128,
|
||||
workers_per_gpu=4,
|
||||
eval_param=dict(topk=(1, )))
|
||||
]
|
||||
# optimizer
|
||||
optimizer = dict(
|
||||
type='SGD',
|
||||
lr=0.01,
|
||||
momentum=0.9,
|
||||
weight_decay=0.0001,
|
||||
paramwise_options=dict(norm_decay_mult=0.),
|
||||
nesterov=True)
|
||||
# learning policy
|
||||
lr_config = dict(policy='step', step=[30, 60, 90])
|
||||
checkpoint_config = dict(interval=10)
|
||||
# runtime settings
|
||||
total_epochs = 90
|
|
@ -85,4 +85,4 @@ optimizer = dict(
|
|||
lr_config = dict(policy='step', step=[400])
|
||||
checkpoint_config = dict(interval=10)
|
||||
# runtime settings
|
||||
total_epochs = 480
|
||||
total_epochs = 200
|
||||
|
|
|
@ -86,7 +86,7 @@ Arguments:
|
|||
1. First, extract backbone weights:
|
||||
|
||||
```shell
|
||||
python tools/extract_backbone_weights.py ${CHECKPOINT} --save-path ${WEIGHT_FILE}
|
||||
python tools/extract_backbone_weights.py ${CHECKPOINT} ${WEIGHT_FILE}
|
||||
```
|
||||
Arguments:
|
||||
- `CHECKPOINTS`: the checkpoint file of a selfsup method named as `epoch_*.pth`.
|
||||
|
@ -119,7 +119,7 @@ python tools/count_parameters.py ${CONFIG_FILE}
|
|||
1. Extract the backbone weights as mentioned before. You don't have to extract it again if you've already done it in the benchmark step.
|
||||
|
||||
```shell
|
||||
python tools/extract_backbone_weights.py ${CHECKPOINT} --save-path ${WEIGHT_FILE}
|
||||
python tools/extract_backbone_weights.py ${CHECKPOINT} ${WEIGHT_FILE}
|
||||
```
|
||||
|
||||
2. Compute the hash of the weight file and append the hash id to the filename.
|
||||
|
|
|
@ -13,7 +13,7 @@ class ContrastiveDataset(BaseDataset):
|
|||
super(ContrastiveDataset, self).__init__(data_source, pipeline)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
img, _ = self.data_source.get_sample(idx)
|
||||
img = self.data_source.get_sample(idx)
|
||||
img1 = self.pipeline(img)
|
||||
img2 = self.pipeline(img)
|
||||
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
|
||||
|
|
|
@ -39,5 +39,8 @@ class ImageList(object):
|
|||
else:
|
||||
img = Image.open(self.fns[idx])
|
||||
img = img.convert('RGB')
|
||||
target = self.labels[idx] if self.has_labels else None
|
||||
return img, target
|
||||
if self.has_labels:
|
||||
target = self.labels[idx]
|
||||
return img, target
|
||||
else:
|
||||
return img
|
||||
|
|
|
@ -13,7 +13,7 @@ class DeepClusterDataset(BaseDataset):
|
|||
self.labels = [-1 for _ in range(self.data_source.get_length())]
|
||||
|
||||
def __getitem__(self, idx):
|
||||
img, _ = self.data_source.get_sample(idx)
|
||||
img = self.data_source.get_sample(idx)
|
||||
label = self.labels[idx]
|
||||
img = self.pipeline(img)
|
||||
return dict(img=img, pseudo_label=label, idx=idx)
|
||||
|
|
|
@ -11,7 +11,7 @@ class NPIDDataset(BaseDataset):
|
|||
super(NPIDDataset, self).__init__(data_source, pipeline)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
img, _ = self.data_source.get_sample(idx)
|
||||
img = self.data_source.get_sample(idx)
|
||||
img = self.pipeline(img)
|
||||
return dict(img=img, idx=idx)
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ class RotationPredDataset(BaseDataset):
|
|||
super(RotationPredDataset, self).__init__(data_source, pipeline)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
img, _ = self.data_source.get_sample(idx)
|
||||
img = self.data_source.get_sample(idx)
|
||||
img = self.pipeline(img)
|
||||
img = torch.stack(rotate(img), dim=0)
|
||||
rotation_labels = torch.LongTensor([0, 1, 2, 3])
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
set -x
|
||||
|
||||
PYTHON=${PYTHON:-"python"}
|
||||
CFG=$1
|
||||
CHECKPOINT=$2
|
||||
GPUS=${3:-8}
|
||||
GPUS=$2
|
||||
PY_ARGS=${@:3} # "--checkpoint $CHECKPOINT --pretrained $PRETRAINED"
|
||||
PORT=${PORT:-29500}
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
if [ "$CHECKPOINT" == "" ]; then
|
||||
$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/extract.py $CFG --layer-ind "0,1,2,3,4" --work_dir $WORK_DIR --launcher pytorch
|
||||
else
|
||||
$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/extract.py $CFG --layer-ind "0,1,2,3,4" --checkpoint $CHECKPOINT \
|
||||
--work_dir $WORK_DIR --launcher pytorch
|
||||
fi
|
||||
$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
|
||||
tools/extract.py $CFG --layer-ind "0,1,2,3,4" --work_dir $WORK_DIR \
|
||||
--launcher pytorch ${PY_ARGS}
|
||||
|
|
|
@ -3,8 +3,8 @@ PYTHON=${PYTHON:-"python"}
|
|||
|
||||
CFG=$1
|
||||
GPUS=$2
|
||||
PORT=${PORT:-29500}
|
||||
PY_ARGS=${@:3}
|
||||
PORT=${PORT:-29500}
|
||||
|
||||
WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
|
||||
|
||||
|
|
|
@ -52,6 +52,9 @@ def parse_args():
|
|||
description='OpenSelfSup extract features of a model')
|
||||
parser.add_argument('config', help='test config file path')
|
||||
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
|
||||
parser.add_argument(
|
||||
'--pretrained', default='random',
|
||||
help='pretrained model file, exclusive to --checkpoint')
|
||||
parser.add_argument(
|
||||
'--dataset-config',
|
||||
default='benchmarks/extract_info/voc07.py',
|
||||
|
@ -89,9 +92,9 @@ def main():
|
|||
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
|
||||
cfg.model.backbone.out_indices = layer_ind
|
||||
|
||||
if args.checkpoint is None:
|
||||
assert cfg.model.pretrained is not None, \
|
||||
"Must have pretrain if no checkpoint is given."
|
||||
# checkpoint and pretrained are exclusive
|
||||
assert cfg.model.pretrained == "random" or args.checkpoint is None, \
|
||||
"Checkpoint and pretrained are exclusive."
|
||||
|
||||
# check memcached package exists
|
||||
if importlib.util.find_spec('mc') is None:
|
||||
|
@ -106,6 +109,8 @@ def main():
|
|||
distributed = True
|
||||
init_dist(args.launcher, **cfg.dist_params)
|
||||
|
||||
# create work_dir
|
||||
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
|
||||
# logger
|
||||
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
|
||||
log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp))
|
||||
|
@ -124,7 +129,15 @@ def main():
|
|||
# build the model and load checkpoint
|
||||
model = build_model(cfg.model)
|
||||
if args.checkpoint is not None:
|
||||
logger.info("Use checkpoint: {} to extract features".format(
|
||||
args.checkpoint))
|
||||
load_checkpoint(model, args.checkpoint, map_location='cpu')
|
||||
elif args.pretrained != "random":
|
||||
logger.info('Use pretrained model: {} to extract features'.format(
|
||||
args.pretrained))
|
||||
else:
|
||||
logger.info('No checkpoint or pretrained is give, use random init.')
|
||||
|
||||
if not distributed:
|
||||
model = MMDataParallel(model, device_ids=[0])
|
||||
else:
|
||||
|
|
|
@ -7,15 +7,14 @@ def parse_args():
|
|||
description='This script extracts backbone weights from a checkpoint')
|
||||
parser.add_argument('checkpoint', help='checkpoint file')
|
||||
parser.add_argument(
|
||||
'--save-path', type=str, default=None, help='destination file name')
|
||||
'output', type=str, help='destination file name')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
if args.save_path is None:
|
||||
args.save_path = args.checkpoint[:-4] + "_extracted.pth"
|
||||
assert args.output.endswith(".pth")
|
||||
ck = torch.load(args.checkpoint, map_location=torch.device('cpu'))
|
||||
output_dict = dict(state_dict=dict(), author="OpenSelfSup")
|
||||
has_backbone = False
|
||||
|
@ -25,7 +24,7 @@ def main():
|
|||
has_backbone = True
|
||||
if not has_backbone:
|
||||
raise Exception("Cannot find a backbone module in the checkpoint.")
|
||||
torch.save(output_dict, args.save_path)
|
||||
torch.save(output_dict, args.output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
|
||||
PARTITION=$1
|
||||
CFG=$2
|
||||
CHECKPOINT=$3
|
||||
GPUS=${4:-8}
|
||||
PY_ARGS=${@:5}
|
||||
GPUS=$3
|
||||
PY_ARGS=${@:4} # "--checkpoint $CHECKPOINT --pretrained $PRETRAINED"
|
||||
JOB_NAME="openselfsup"
|
||||
GPUS_PER_NODE=${GPUS_PER_NODE:-8}
|
||||
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
|
||||
|
@ -23,5 +21,5 @@ srun -p ${PARTITION} \
|
|||
--kill-on-bad-exit=1 \
|
||||
${SRUN_ARGS} \
|
||||
python -u tools/extract.py $CFG \
|
||||
--layer-ind "0,1,2,3,4" --checkpoint $CHECKPOINT \
|
||||
--work_dir $WORK_DIR --launcher="slurm" ${PY_ARGS}
|
||||
--layer-ind "0,1,2,3,4" --work_dir $WORK_DIR \
|
||||
--launcher="slurm" ${PY_ARGS}
|
||||
|
|
Loading…
Reference in New Issue