Support Single Image Inference .

pull/5/head
mayorx 2022-04-16 18:32:43 +08:00
parent 08a0c45b16
commit 1356b09f9b
8 changed files with 42 additions and 9 deletions

View File

@ -9,7 +9,7 @@ import torch
# from basicsr.data import create_dataloader, create_dataset
from basicsr.models import create_model
from basicsr.train import parse_options
from basicsr.utils import FileClient, imfrombytes, img2tensor, padding
from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite
# from basicsr.utils import (get_env_info, get_root_logger, get_time_str,
# make_exp_dirs)
@ -37,10 +37,24 @@ def main():
## 2. run inference
opt['dist'] = False
model = create_model(opt)
model.single_image_inference(img, output_path)
print('inference {} .. finished.'.format(img_path))
model.feed_data(data={'lq': img.unsqueeze(dim=0)})
if model.opt['val'].get('grids', False):
model.grids()
model.test()
if model.opt['val'].get('grids', False):
model.grids_inverse()
visuals = model.get_current_visuals()
sr_img = tensor2img([visuals['result']])
imwrite(sr_img, output_path)
print(f'inference {img_path} .. finished. saved to {output_path}')
if __name__ == '__main__':
main()

View File

@ -35,6 +35,10 @@ def parse_options(is_train=True):
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--input_path', type=str, required=False, help='The path to the input image. For single image inference only.')
parser.add_argument('--output_path', type=str, required=False, help='The path to the output image. For single image inference only.')
args = parser.parse_args()
opt = parse(args.opt, is_train=is_train)
@ -59,6 +63,12 @@ def parse_options(is_train=True):
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
if args.input_path is not None and args.output_path is not None:
opt['img_path'] = {
'input_img': args.input_path,
'output_img': args.output_path
}
return opt

BIN
demo/blurry.png 100644

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

BIN
demo/deblur_img.png 100644

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

BIN
demo/noisy.png 100644

Binary file not shown.

After

Width:  |  Height:  |  Size: 167 KiB

View File

@ -1,5 +1,4 @@
### Pretrained NAFNet Models
---
* [NAFNet-SIDD-width64](https://drive.google.com/file/d/14Fht1QQJ2gMlk4N1ERCRuElg8JfjrWWR/view?usp=sharing)
* [NAFNet-GoPro-width64](https://drive.google.com/file/d/1S0PVRbyTakYY9a82kujgZLbMihfNBLfC/view?usp=sharing)
please refer to https://github.com/megvii-research/NAFNet/#results-and-pre-trained-models, and download the pretrained models into ./experiments/pretrained_models

View File

@ -35,8 +35,18 @@ python setup.py develop --no_cuda_ext
### Quick Start
* Image Denoise Colab Demo: [<a href="https://colab.research.google.com/drive/1dkO5AyktmBoWwxBwoKFUurIDn0m4qDXT?usp=sharing"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="google colab logo"></a>](https://colab.research.google.com/drive/1dkO5AyktmBoWwxBwoKFUurIDn0m4qDXT?usp=sharing)
* Image Deblur Colab Demo: [<a href="https://colab.research.google.com/drive/1yR2ClVuMefisH12d_srXMhHnHwwA1YmU?usp=sharing"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="google colab logo"></a>](https://colab.research.google.com/drive/1yR2ClVuMefisH12d_srXMhHnHwwA1YmU?usp=sharing)
* Single Image Inference Demo:
* Image Denoise:
```
python basicsr/demo.py -opt options/test/SIDD/NAFNet-width64.yml --input_path ./demo/noisy.png --output_path ./demo/denoise_img.png
```
* Image Deblur:
```
python basicsr/demo.py -opt options/test/GoPro/NAFNet-width64.yml --input_path ./demo/blurry.png --output_path ./demo/deblur_img.png
```
* ```--input_path```: the path of the degraded image
* ```--output_path```: the path to save the predicted image
* [pretrained models](https://github.com/megvii-research/NAFNet/#results-and-pre-trained-models) should be downloaded.
### Results and Pre-trained Models
@ -78,8 +88,8 @@ If you have any questions, please contact chenliangyu@megvii.com or chuxiaojie@m
<details>
<summary>statistics</summary>
![visitors](https://visitor-badge.glitch.me/badge?page_id=megvii-research/NAFNet)
</details>