mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
111 lines
3.1 KiB
Python
111 lines
3.1 KiB
Python
exp_name = 'srcnn_x4k915_g1_1000k_div2k'
|
|
|
|
scale = 1
|
|
# model settings
|
|
model = dict(
|
|
type='BasicRestorer',
|
|
generator=dict(
|
|
type='SRCNN',
|
|
channels=(3, 64, 32, 3),
|
|
kernel_sizes=(9, 1, 5),
|
|
upscale_factor=scale),
|
|
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
|
|
# model training and testing settings
|
|
train_cfg = None
|
|
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=scale)
|
|
|
|
# dataset settings
|
|
train_dataset_type = 'SRAnnotationDataset'
|
|
val_dataset_type = 'SRFolderDataset'
|
|
train_pipeline = [
|
|
dict(
|
|
type='LoadImageFromFile',
|
|
io_backend='disk',
|
|
key='lq',
|
|
flag='unchanged'),
|
|
dict(
|
|
type='LoadImageFromFile',
|
|
io_backend='disk',
|
|
key='gt',
|
|
flag='unchanged'),
|
|
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
|
|
dict(
|
|
type='Normalize',
|
|
keys=['lq', 'gt'],
|
|
mean=[0, 0, 0],
|
|
std=[1, 1, 1],
|
|
to_rgb=True),
|
|
dict(type='PairedRandomCrop', gt_patch_size=128),
|
|
dict(
|
|
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
|
|
direction='horizontal'),
|
|
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
|
|
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
|
|
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']),
|
|
dict(type='ImageToTensor', keys=['lq', 'gt'])
|
|
]
|
|
test_pipeline = [
|
|
dict(
|
|
type='LoadImageFromFile',
|
|
io_backend='disk',
|
|
key='lq',
|
|
flag='unchanged'),
|
|
dict(
|
|
type='LoadImageFromFile',
|
|
io_backend='disk',
|
|
key='gt',
|
|
flag='unchanged'),
|
|
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
|
|
dict(
|
|
type='Normalize',
|
|
keys=['lq', 'gt'],
|
|
mean=[0, 0, 0],
|
|
std=[1, 1, 1],
|
|
to_rgb=True),
|
|
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'lq_path']),
|
|
dict(type='ImageToTensor', keys=['lq', 'gt'])
|
|
]
|
|
|
|
data = dict(
|
|
workers_per_gpu=8,
|
|
train_dataloader=dict(samples_per_gpu=16, drop_last=True),
|
|
val_dataloader=dict(samples_per_gpu=1),
|
|
test_dataloader=dict(samples_per_gpu=1),
|
|
test=dict(
|
|
type=val_dataset_type,
|
|
lq_folder='tests/test_codebase/test_mmedit/data/imgs',
|
|
gt_folder='tests/test_codebase/test_mmedit/data/imgs',
|
|
pipeline=test_pipeline,
|
|
scale=scale,
|
|
filename_tmpl='{}'))
|
|
|
|
# optimizer
|
|
optimizers = dict(generator=dict(type='Adam', lr=2e-4, betas=(0.9, 0.999)))
|
|
|
|
# learning policy
|
|
total_iters = 1000000
|
|
lr_config = dict(
|
|
policy='CosineRestart',
|
|
by_epoch=False,
|
|
periods=[250000, 250000, 250000, 250000],
|
|
restart_weights=[1, 1, 1, 1],
|
|
min_lr=1e-7)
|
|
|
|
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
|
|
evaluation = dict(interval=5000, save_image=True, gpu_collect=True)
|
|
log_config = dict(
|
|
interval=100,
|
|
hooks=[
|
|
dict(type='TextLoggerHook', by_epoch=False),
|
|
dict(type='TensorboardLoggerHook'),
|
|
])
|
|
visual_config = None
|
|
|
|
# runtime settings
|
|
dist_params = dict(backend='nccl')
|
|
log_level = 'INFO'
|
|
work_dir = f'./work_dirs/{exp_name}'
|
|
load_from = None
|
|
resume_from = None
|
|
workflow = [('train', 1)]
|