update format
parent
cfee84c4ef
commit
77f46bc1ef
|
@ -1,12 +1,10 @@
|
||||||
Global:
|
Modules:
|
||||||
Engine: POPEngine
|
- name: Detector
|
||||||
infer_imgs: "../../images/wangzai.jpg"
|
type: AlgoMod
|
||||||
|
processors:
|
||||||
AlgoModule:
|
- name: ImageProcessor
|
||||||
- Module:
|
type: preprocessor
|
||||||
preprocess:
|
ops:
|
||||||
name: ImageProcessor
|
|
||||||
processors:
|
|
||||||
- ResizeImage:
|
- ResizeImage:
|
||||||
size: [640, 640]
|
size: [640, 640]
|
||||||
interpolation: 2
|
interpolation: 2
|
||||||
|
@ -17,14 +15,16 @@ AlgoModule:
|
||||||
order: hwc
|
order: hwc
|
||||||
- ToCHWImage:
|
- ToCHWImage:
|
||||||
- GetShapeInfo:
|
- GetShapeInfo:
|
||||||
order: chw
|
configs:
|
||||||
|
order: chw
|
||||||
- ToBatch:
|
- ToBatch:
|
||||||
predictor:
|
- name: PaddlePredictor
|
||||||
|
type: predictor
|
||||||
inference_model_dir: ./models/ppyolov2_r50vd_dcn_mainbody_v1.0_infer/
|
inference_model_dir: ./models/ppyolov2_r50vd_dcn_mainbody_v1.0_infer/
|
||||||
input_names:
|
input_names:
|
||||||
output_names:
|
output_names:
|
||||||
postprocess:
|
- name: PPYOLOv2PostPro
|
||||||
name: DetPostProcessor
|
type: postprocessor
|
||||||
threshold: 0.2
|
threshold: 0.2
|
||||||
max_det_results: 1
|
max_det_results: 1
|
||||||
label_list:
|
label_list:
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
|
import importlib
|
||||||
|
|
||||||
from processor.algo_mod import AlgoMod
|
from processor.algo_mod import AlgoMod
|
||||||
|
|
||||||
|
|
||||||
class POPEngine:
|
class POPEngine:
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.algo_list = []
|
self.algo_list = []
|
||||||
# last_algo_type = "start"
|
current_mod = importlib.import_module(__name__)
|
||||||
for algo_config in config["AlgoModule"]:
|
for mod_config in config["Modules"]:
|
||||||
# algo_config["last_algo_type"] = last_algo_type
|
mod_type = mod_config.get("type")
|
||||||
self.algo_list.append(AlgoMod(algo_config["Module"]))
|
mod = getattr(current_mod, mod_type)(mod_config)
|
||||||
# last_algo_type = algo_config["type"]
|
self.algo_list.append(mod)
|
||||||
|
|
||||||
def process(self, x):
|
def process(self, input_data):
|
||||||
for algo_module in self.algo_list:
|
for algo_module in self.algo_list:
|
||||||
x = algo_module.process(x)
|
input_data = algo_module.process(input_data)
|
||||||
return x
|
return input_data
|
||||||
|
|
|
@ -7,7 +7,6 @@ import cv2
|
||||||
|
|
||||||
from engine import build_engine
|
from engine import build_engine
|
||||||
from utils import config
|
from utils import config
|
||||||
from utils.get_image_list import get_image_list
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -16,13 +15,11 @@ def main():
|
||||||
args.config, overrides=args.override, show=False)
|
args.config, overrides=args.override, show=False)
|
||||||
config_dict.profiler_options = args.profiler_options
|
config_dict.profiler_options = args.profiler_options
|
||||||
engine = build_engine(config_dict)
|
engine = build_engine(config_dict)
|
||||||
|
image_file = "../../images/wangzai.jpg"
|
||||||
image_list = get_image_list(config_dict["Global"]["infer_imgs"])
|
img = cv2.imread(image_file)[:, :, ::-1]
|
||||||
for idx, image_file in enumerate(image_list):
|
input_data = {"input_image": img}
|
||||||
img = cv2.imread(image_file)[:, :, ::-1]
|
output = engine.process(input_data)
|
||||||
input_data = {"input_image": img}
|
print(output)
|
||||||
output = engine.process(input_data)
|
|
||||||
print(output)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -1,20 +1 @@
|
||||||
from abc import ABC, abstractmethod
|
from .algo_mod import AlgoMod
|
||||||
|
|
||||||
from processor.algo_mod import searcher
|
|
||||||
from processor.algo_mod.predictors import build_predictor
|
|
||||||
|
|
||||||
# def build_processor(config):
|
|
||||||
# print(config)
|
|
||||||
# processor_type = config.get("processor_type")
|
|
||||||
# processor_mod = locals()[processor_type]
|
|
||||||
# processor_name = config.get("processor_name")
|
|
||||||
# return getattr(processor_mod, processor_name)
|
|
||||||
|
|
||||||
# class BaseProcessor(ABC):
|
|
||||||
# @abstractmethod
|
|
||||||
# def __init__(self, config):
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# @abstractmethod
|
|
||||||
# def process(self, input_data):
|
|
||||||
# pass
|
|
||||||
|
|
|
@ -1,23 +1,26 @@
|
||||||
from processor.algo_mod.data_processor import ImageProcessor
|
from .postprocessor import build_postprocessor
|
||||||
from processor.algo_mod.post_processor.det import DetPostProcessor
|
from .preprocessor import build_preprocessor
|
||||||
from processor.algo_mod.predictors import build_predictor
|
from .predictor import build_predictor
|
||||||
|
|
||||||
|
from ..base_processor import BaseProcessor
|
||||||
|
|
||||||
|
|
||||||
def build_processor(config):
|
class AlgoMod(BaseProcessor):
|
||||||
# processor_type = config.get("processor_type")
|
|
||||||
# processor_mod = locals()[processor_type]
|
|
||||||
processor_name = config.get("name")
|
|
||||||
return eval(processor_name)(config)
|
|
||||||
|
|
||||||
|
|
||||||
class AlgoMod(object):
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.pre_processor = build_processor(config["preprocess"])
|
self.processors = []
|
||||||
self.predictor = build_predictor(config["predictor"])
|
for processor_config in config["processors"]:
|
||||||
self.post_processor = build_processor(config["postprocess"])
|
processor_type = processor_config.get("type")
|
||||||
|
if processor_type == "preprocessor":
|
||||||
|
processor = build_preprocessor(processor_config)
|
||||||
|
elif processor_type == "predictor":
|
||||||
|
processor = build_predictor(processor_config)
|
||||||
|
elif processor_type == "postprocessor":
|
||||||
|
processor = build_postprocessor(processor_config)
|
||||||
|
else:
|
||||||
|
raise NotImplemented("processor type {} unknown.".format(processor_type))
|
||||||
|
self.processors.append(processor)
|
||||||
|
|
||||||
def process(self, input_data):
|
def process(self, input_data):
|
||||||
input_data = self.pre_processor.process(input_data)
|
for processor in self.processors:
|
||||||
input_data = self.predictor.process(input_data)
|
input_data = processor.process(input_data)
|
||||||
input_data = self.post_processor.process(input_data)
|
|
||||||
return input_data
|
return input_data
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
from processor.algo_mod.data_processor.image_processor import ImageProcessor
|
|
||||||
from processor.algo_mod.data_processor.bbox_cropper import BBoxCropper
|
|
|
@ -1,9 +0,0 @@
|
||||||
from processor.algo_mod.data_processor.image_processor import BaseProcessor
|
|
||||||
|
|
||||||
|
|
||||||
class BBoxCropper(BaseProcessor):
|
|
||||||
def __init__(self, config):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def process(self, input_data):
|
|
||||||
pass
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
from .det import PPYOLOv2PostPro
|
||||||
|
|
||||||
|
|
||||||
|
def build_postprocessor(config):
|
||||||
|
processor_mod = importlib.import_module(__name__)
|
||||||
|
processor_name = config.get("name")
|
||||||
|
return getattr(processor_mod, processor_name)(config)
|
|
@ -1,19 +1,21 @@
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
from utils import logger
|
||||||
|
from ...base_processor import BaseProcessor
|
||||||
|
|
||||||
class DetPostProcessor(object):
|
|
||||||
|
class PPYOLOv2PostPro(BaseProcessor):
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
super().__init__()
|
|
||||||
self.threshold = config["threshold"]
|
self.threshold = config["threshold"]
|
||||||
self.label_list = config["label_list"]
|
self.label_list = config["label_list"]
|
||||||
self.max_det_results = config["max_det_results"]
|
self.max_det_results = config["max_det_results"]
|
||||||
|
|
||||||
def process(self, pred):
|
def process(self, input_data):
|
||||||
np_boxes = pred["save_infer_model/scale_0.tmp_1"]
|
pred = input_data["pred"]
|
||||||
|
np_boxes = pred[list(pred.keys())[0]]
|
||||||
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
|
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
|
||||||
print('[WARNNING] No object detected.')
|
logger.warning('[Detector] No object detected.')
|
||||||
np_boxes = np.array([])
|
np_boxes = np.array([])
|
||||||
|
|
||||||
keep_indexes = np_boxes[:, 1].argsort()[::-1][:self.max_det_results]
|
keep_indexes = np_boxes[:, 1].argsort()[::-1][:self.max_det_results]
|
|
@ -0,0 +1,10 @@
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
from processor.algo_mod.predictor.paddle_predictor import PaddlePredictor
|
||||||
|
from processor.algo_mod.predictor.onnx_predictor import ONNXPredictor
|
||||||
|
|
||||||
|
|
||||||
|
def build_predictor(config):
|
||||||
|
processor_mod = importlib.import_module(__name__)
|
||||||
|
processor_name = config.get("name")
|
||||||
|
return getattr(processor_mod, processor_name)(config)
|
|
@ -0,0 +1,8 @@
|
||||||
|
from ...base_processor import BaseProcessor
|
||||||
|
|
||||||
|
|
||||||
|
class ONNXPredictor(BaseProcessor):
|
||||||
|
def __init__(self, config):
|
||||||
|
pass
|
||||||
|
def process(self, input_data):
|
||||||
|
raise NotImplemented("ONNXPredictor Not supported yet")
|
|
@ -1,14 +1,13 @@
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
from paddle.inference import create_predictor
|
from paddle.inference import create_predictor
|
||||||
from paddle.inference import Config as PaddleConfig
|
from paddle.inference import Config as PaddleConfig
|
||||||
|
|
||||||
|
from ...base_processor import BaseProcessor
|
||||||
|
|
||||||
class Predictor(object):
|
|
||||||
|
class PaddlePredictor(BaseProcessor):
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
super().__init__()
|
|
||||||
# HALF precission predict only work when using tensorrt
|
|
||||||
if config.get("use_fp16", False):
|
if config.get("use_fp16", False):
|
||||||
assert config.get("use_tensorrt", False) is True
|
assert config.get("use_tensorrt", False) is True
|
||||||
|
|
||||||
|
@ -61,5 +60,5 @@ class Predictor(object):
|
||||||
for output_name in output_names:
|
for output_name in output_names:
|
||||||
output = self.predictor.get_output_handle(output_name)
|
output = self.predictor.get_output_handle(output_name)
|
||||||
output_data[output_name] = output.copy_to_cpu()
|
output_data[output_name] = output.copy_to_cpu()
|
||||||
|
input_data["pred"] = output_data
|
||||||
return output_data
|
return input_data
|
|
@ -1,11 +0,0 @@
|
||||||
from processor.algo_mod.predictors.paddle_predictor import Predictor as paddle_predictor
|
|
||||||
from processor.algo_mod.predictors.onnx_predictor import Predictor as onnx_predictor
|
|
||||||
|
|
||||||
|
|
||||||
def build_predictor(config):
|
|
||||||
# if use paddle backend
|
|
||||||
if True:
|
|
||||||
return paddle_predictor(config)
|
|
||||||
# if use onnx backend
|
|
||||||
else:
|
|
||||||
return onnx_predictor(config)
|
|
|
@ -1,3 +0,0 @@
|
||||||
class Predictor(object):
|
|
||||||
def __init__(self, config):
|
|
||||||
super().__init__()
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
from processor.algo_mod.preprocessor.image_processor import ImageProcessor
|
||||||
|
|
||||||
|
|
||||||
|
def build_preprocessor(config):
|
||||||
|
processor_mod = importlib.import_module(__name__)
|
||||||
|
processor_name = config.get("name")
|
||||||
|
return getattr(processor_mod, processor_name)(config)
|
|
@ -3,32 +3,20 @@ import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import importlib
|
import importlib
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import paddle
|
|
||||||
|
|
||||||
from utils import logger
|
from utils import logger
|
||||||
# from processor import BaseProcessor
|
from processor.base_processor import BaseProcessor
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
|
|
||||||
|
|
||||||
class BaseProcessor(ABC):
|
class ImageProcessor(BaseProcessor):
|
||||||
@abstractmethod
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def process(self, input_data):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ImageProcessor(object):
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.processors = []
|
self.processors = []
|
||||||
for processor_config in config.get("processors"):
|
mod = importlib.import_module(__name__)
|
||||||
|
for processor_config in config.get("ops"):
|
||||||
name = list(processor_config)[0]
|
name = list(processor_config)[0]
|
||||||
param = {} if processor_config[name] is None else processor_config[
|
param = {} if processor_config[name] is None else processor_config[
|
||||||
name]
|
name]
|
||||||
op = eval(name)(**param)
|
op = getattr(mod, name)(**param)
|
||||||
self.processors.append(op)
|
self.processors.append(op)
|
||||||
|
|
||||||
def process(self, input_data):
|
def process(self, input_data):
|
||||||
|
@ -39,13 +27,13 @@ class ImageProcessor(object):
|
||||||
input_data = processor.process(input_data)
|
input_data = processor.process(input_data)
|
||||||
else:
|
else:
|
||||||
image = processor(image)
|
image = processor(image)
|
||||||
|
input_data["image"] = image
|
||||||
return input_data
|
return input_data
|
||||||
|
|
||||||
|
|
||||||
class GetShapeInfo(BaseProcessor):
|
class GetShapeInfo(BaseProcessor):
|
||||||
def __init__(self, order="hwc"):
|
def __init__(self, configs):
|
||||||
super().__init__()
|
self.order = configs.get("order")
|
||||||
self.order = order
|
|
||||||
|
|
||||||
def process(self, input_data):
|
def process(self, input_data):
|
||||||
input_image = input_data["input_image"]
|
input_image = input_data["input_image"]
|
||||||
|
@ -69,43 +57,22 @@ class GetShapeInfo(BaseProcessor):
|
||||||
],
|
],
|
||||||
dtype=np.float32)
|
dtype=np.float32)
|
||||||
input_data['input_shape'] = np.array(image.shape[:2], dtype=np.float32)
|
input_data['input_shape'] = np.array(image.shape[:2], dtype=np.float32)
|
||||||
print(image.shape[0])
|
|
||||||
return input_data
|
return input_data
|
||||||
|
|
||||||
|
|
||||||
# class ToTensor(BaseProcessor):
|
class ToBatch:
|
||||||
# def __init__(self):
|
def __call__(self, img):
|
||||||
# super().__init__()
|
img = img[np.newaxis, :, :, :]
|
||||||
|
return img
|
||||||
# def process(self, input_data):
|
|
||||||
# image = input_data["image"]
|
|
||||||
# input_data["input_tensor"] = paddle.to_tensor(image)
|
|
||||||
# return input_data
|
|
||||||
|
|
||||||
|
|
||||||
class ToBatch(BaseProcessor):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def process(self, input_data):
|
|
||||||
image = input_data["image"]
|
|
||||||
input_data["image"] = image[np.newaxis, :, :, :]
|
|
||||||
return input_data
|
|
||||||
|
|
||||||
|
|
||||||
class ToRGB:
|
class ToRGB:
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __call__(self, img):
|
def __call__(self, img):
|
||||||
img = img[:, :, ::-1]
|
img = img[:, :, ::-1]
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
||||||
class ToCHWImage:
|
class ToCHWImage:
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __call__(self, img, img_info=None):
|
def __call__(self, img, img_info=None):
|
||||||
img = img.transpose((2, 0, 1))
|
img = img.transpose((2, 0, 1))
|
||||||
return img
|
return img
|
|
@ -0,0 +1,11 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class BaseProcessor(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, config):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def process(self, input_data):
|
||||||
|
pass
|
Loading…
Reference in New Issue