mmclassification/tools/visualizations/browse_dataset.py

223 lines
7.6 KiB
Python
Raw Normal View History

2022-05-30 11:11:44 +08:00
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import sys
2022-06-16 21:33:19 +08:00
import cv2
2022-05-30 11:11:44 +08:00
import mmcv
2022-06-16 21:33:19 +08:00
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.dataset import Compose
from mmengine.visualization import Visualizer
2022-05-30 11:11:44 +08:00
from mmcls.datasets.builder import build_dataset
from mmcls.registry import VISUALIZERS
from mmcls.utils import register_all_modules
2022-07-13 18:23:55 +08:00
from mmcls.visualization import ClsVisualizer
from mmcls.visualization.cls_visualizer import _get_adaptive_scale
2022-05-30 11:11:44 +08:00
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
2022-06-16 21:33:19 +08:00
default=None,
2022-05-30 11:11:44 +08:00
type=str,
2022-06-16 21:33:19 +08:00
help='If there is no display interface, you can save it.')
2022-05-30 11:11:44 +08:00
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--phase',
default='train',
type=str,
choices=['train', 'test', 'val'],
help='phase of dataset to visualize, accept "train" "test" and "val".'
2022-06-16 21:33:19 +08:00
' Defaults to "train".')
2022-05-30 11:11:44 +08:00
parser.add_argument(
'--show-number',
type=int,
default=sys.maxsize,
help='number of images selected to visualize, must bigger than 0. if '
'the number is bigger than length of dataset, show all the images in '
'dataset; default "sys.maxsize", show all images in dataset')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
2022-06-16 21:33:19 +08:00
parser.add_argument(
'--mode',
default='transformed',
type=str,
choices=['original', 'transformed', 'concat', 'pipeline'],
help='display mode; display original pictures or transformed pictures'
' or comparison pictures. "original" means show images load from disk'
'; "transformed" means to show images after transformed; "concat" '
'means show images stitched by "original" and "output" images. '
'"pipeline" means show all the intermediate images. '
'Defaults to "transformed".')
2022-05-30 11:11:44 +08:00
parser.add_argument(
'--rescale-factor',
type=float,
help='image rescale factor, which is useful if the output is too '
'large or too small.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
2022-06-16 21:33:19 +08:00
def make_grid(imgs, names, rescale_factor=None):
"""Concat list of pictures into a single big picture, align height here."""
vis = Visualizer()
ori_shapes = [img.shape[:2] for img in imgs]
if rescale_factor is not None:
imgs = [mmcv.imrescale(img, rescale_factor) for img in imgs]
max_height = int(max(img.shape[0] for img in imgs) * 1.1)
min_width = min(img.shape[1] for img in imgs)
horizontal_gap = min_width // 10
img_scale = _get_adaptive_scale((max_height, min_width))
texts = []
text_positions = []
start_x = 0
for i, img in enumerate(imgs):
pad_height = (max_height - img.shape[0]) // 2
pad_width = horizontal_gap // 2
# make border
imgs[i] = cv2.copyMakeBorder(
img,
pad_height,
max_height - img.shape[0] - pad_height + int(img_scale * 30 * 2),
pad_width,
pad_width,
cv2.BORDER_CONSTANT,
value=(255, 255, 255))
texts.append(f'{names[i]}\n{ori_shapes[i]}')
text_positions.append(
[start_x + img.shape[1] // 2 + pad_width, max_height])
start_x += img.shape[1] + horizontal_gap
display_img = np.concatenate(imgs, axis=1)
vis.set_image(display_img)
img_scale = _get_adaptive_scale(display_img.shape[:2])
vis.draw_texts(
texts,
positions=np.array(text_positions),
font_sizes=img_scale * 7,
colors='black',
horizontal_alignments='center',
font_families='monospace')
return vis.get_image()
class InspectCompose(Compose):
"""Compose multiple transforms sequentially.
And record "img" field of all results in one list.
"""
def __init__(self, transforms, intermediate_imgs):
super().__init__(transforms=transforms)
self.intermediate_imgs = intermediate_imgs
def __call__(self, data):
if 'img' in data:
self.intermediate_imgs.append({
'name': 'original',
'img': data['img'].copy()
})
for t in self.transforms:
data = t(data)
if data is None:
return None
if 'img' in data:
self.intermediate_imgs.append({
'name': t.__class__.__name__,
'img': data['img'].copy()
})
return data
2022-05-30 11:11:44 +08:00
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
2022-06-16 21:33:19 +08:00
# register all modules in mmcls into the registries
2022-05-30 11:11:44 +08:00
register_all_modules()
2022-06-16 21:33:19 +08:00
dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset')
dataset = build_dataset(dataset_cfg)
intermediate_imgs = []
dataset.pipeline = InspectCompose(dataset.pipeline.transforms,
intermediate_imgs)
# init visualizer
visualizer: ClsVisualizer = VISUALIZERS.build(cfg.visualizer)
2022-05-30 11:11:44 +08:00
visualizer.dataset_meta = dataset.metainfo
2022-06-16 21:33:19 +08:00
# init visualization image number
2022-05-30 11:11:44 +08:00
display_number = min(args.show_number, len(dataset))
progress_bar = mmcv.ProgressBar(display_number)
2022-06-16 21:33:19 +08:00
for i, item in zip(range(display_number), dataset):
rescale_factor = args.rescale_factor
if args.mode == 'original':
image = intermediate_imgs[0]['img']
elif args.mode == 'transformed':
image = intermediate_imgs[-1]['img']
elif args.mode == 'concat':
ori_image = intermediate_imgs[0]['img']
trans_image = intermediate_imgs[-1]['img']
image = make_grid([ori_image, trans_image],
['original', 'transformed'], rescale_factor)
rescale_factor = None
else:
image = make_grid([result['img'] for result in intermediate_imgs],
[result['name'] for result in intermediate_imgs],
rescale_factor)
rescale_factor = None
intermediate_imgs.clear()
2022-05-30 11:11:44 +08:00
data_sample = item['data_sample'].numpy()
2022-06-16 21:33:19 +08:00
# get filename from dataset or just use index as filename
if hasattr(item['data_sample'], 'img_path'):
filename = osp.basename(item['data_sample'].img_path)
else:
# some dataset have not image path
filename = f'{i}.jpg'
out_file = osp.join(args.output_dir,
filename) if args.output_dir is not None else None
2022-05-30 11:11:44 +08:00
visualizer.add_datasample(
2022-06-16 21:33:19 +08:00
filename,
image[..., ::-1],
2022-05-30 11:11:44 +08:00
data_sample,
2022-06-16 21:33:19 +08:00
rescale_factor=rescale_factor,
2022-05-30 11:11:44 +08:00
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()