mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
## Motivation Supports inference for ultra-large-scale remote sensing images. ## Modification Add RSImageInference.py in demo. ## Use cases Taking the inference of Vaihingen dataset images using PSPNet as an example, the following settings are required: **img**: Specify the path of the image. **model**: Provide the configuration file for the model. **checkpoint**: Specify the weight file for the model. **out**: Set the output path for the results. **batch_size**: Determine the batch size used during inference. **win_size**: Specify the width and height(512x512) of the sliding window. **stride**: Set the stride(400x400) for sliding the window. **thread(default: 1)**: Specify the number of threads to be used for inference. **Inference device (default: cuda:0)**: Specify the device for inference (e.g., cuda:0 for CPU). ```shell python demo/rs_image_inference.py demo/demo.png projects/pp_mobileseg/configs/pp_mobileseg/pp_mobileseg_mobilenetv3_2x16_80k_ade20k_512x512_tiny.py pp_mobileseg_mobilenetv3_2xb16_3rdparty-tiny_512x512-ade20k-a351ebf5.pth --batch-size 8 --device cpu --thread 2 ``` --------- Co-authored-by: xiexinch <xiexinch@outlook.com>
42 lines
1.2 KiB
Python
42 lines
1.2 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
from collections import defaultdict
|
|
from typing import Sequence, Union
|
|
|
|
import numpy as np
|
|
from mmengine.dataset import Compose
|
|
from mmengine.model import BaseModel
|
|
|
|
ImageType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]
|
|
|
|
|
|
def _preprare_data(imgs: ImageType, model: BaseModel):
|
|
|
|
cfg = model.cfg
|
|
for t in cfg.test_pipeline:
|
|
if t.get('type') == 'LoadAnnotations':
|
|
cfg.test_pipeline.remove(t)
|
|
|
|
is_batch = True
|
|
if not isinstance(imgs, (list, tuple)):
|
|
imgs = [imgs]
|
|
is_batch = False
|
|
|
|
if isinstance(imgs[0], np.ndarray):
|
|
cfg.test_pipeline[0]['type'] = 'LoadImageFromNDArray'
|
|
|
|
# TODO: Consider using the singleton pattern to avoid building
|
|
# a pipeline for each inference
|
|
pipeline = Compose(cfg.test_pipeline)
|
|
|
|
data = defaultdict(list)
|
|
for img in imgs:
|
|
if isinstance(img, np.ndarray):
|
|
data_ = dict(img=img)
|
|
else:
|
|
data_ = dict(img_path=img)
|
|
data_ = pipeline(data_)
|
|
data['inputs'].append(data_['inputs'])
|
|
data['data_samples'].append(data_['data_samples'])
|
|
|
|
return data, is_batch
|