mirror of
https://github.com/PaddlePaddle/PaddleClas.git
synced 2025-06-03 21:55:06 +08:00
refactor
This commit is contained in:
parent
9b4b7a67b7
commit
cfee84c4ef
@ -1,13 +1,31 @@
|
||||
Global:
|
||||
Engine: POPEngine
|
||||
infer_imgs: "../../images/wangzai.jpg"
|
||||
|
||||
AlgoModule:
|
||||
- preprocess:
|
||||
- processor_type: data_processor
|
||||
processor_name: image_processor
|
||||
image_processors:
|
||||
- ResizeImage:
|
||||
size: [640, 640]
|
||||
interpolation: 2
|
||||
- NormalizeImage:
|
||||
scale: 0.00392157
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
- ToRGB
|
||||
- Module:
|
||||
preprocess:
|
||||
name: ImageProcessor
|
||||
processors:
|
||||
- ResizeImage:
|
||||
size: [640, 640]
|
||||
interpolation: 2
|
||||
- NormalizeImage:
|
||||
scale: 0.00392157
|
||||
mean: [0.485, 0.456, 0.406]
|
||||
std: [0.229, 0.224, 0.225]
|
||||
order: hwc
|
||||
- ToCHWImage:
|
||||
- GetShapeInfo:
|
||||
order: chw
|
||||
- ToBatch:
|
||||
predictor:
|
||||
inference_model_dir: ./models/ppyolov2_r50vd_dcn_mainbody_v1.0_infer/
|
||||
input_names:
|
||||
output_names:
|
||||
postprocess:
|
||||
name: DetPostProcessor
|
||||
threshold: 0.2
|
||||
max_det_results: 1
|
||||
label_list:
|
||||
- foreground
|
@ -1,2 +1,8 @@
|
||||
from engine.pop_engine import POPEngine
|
||||
|
||||
# import pipe engine, etc
|
||||
|
||||
|
||||
# TODO(gaotingquan): build engine according to config
|
||||
def build_engine(config):
|
||||
pass
|
||||
return POPEngine(config)
|
||||
|
@ -1,16 +1,16 @@
|
||||
from ..processor import build_processor
|
||||
from processor.algo_mod import AlgoMod
|
||||
|
||||
|
||||
class POPEngine:
|
||||
def __init__(self, config):
|
||||
self.processor_list = []
|
||||
last_algo_type = "start"
|
||||
for processor_config in config["Processors"]:
|
||||
processor_config["last_algo_type"] = last_algo_type
|
||||
self.processor_list.append(build_processor(processor_config))
|
||||
last_algo_type = processor_config["type"]
|
||||
self.algo_list = []
|
||||
# last_algo_type = "start"
|
||||
for algo_config in config["AlgoModule"]:
|
||||
# algo_config["last_algo_type"] = last_algo_type
|
||||
self.algo_list.append(AlgoMod(algo_config["Module"]))
|
||||
# last_algo_type = algo_config["type"]
|
||||
|
||||
def process(self, x):
|
||||
for processor in self.processor_list:
|
||||
x = processor.process(x)
|
||||
for algo_module in self.algo_list:
|
||||
x = algo_module.process(x)
|
||||
return x
|
||||
|
@ -1,5 +1,13 @@
|
||||
from ..engine import build_engine
|
||||
from ..utils import config
|
||||
import os
|
||||
import sys
|
||||
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
|
||||
|
||||
import cv2
|
||||
|
||||
from engine import build_engine
|
||||
from utils import config
|
||||
from utils.get_image_list import get_image_list
|
||||
|
||||
|
||||
def main():
|
||||
@ -9,6 +17,13 @@ def main():
|
||||
config_dict.profiler_options = args.profiler_options
|
||||
engine = build_engine(config_dict)
|
||||
|
||||
image_list = get_image_list(config_dict["Global"]["infer_imgs"])
|
||||
for idx, image_file in enumerate(image_list):
|
||||
img = cv2.imread(image_file)[:, :, ::-1]
|
||||
input_data = {"input_image": img}
|
||||
output = engine.process(input_data)
|
||||
print(output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,20 +1,20 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from processor.algo_mod import predictors, searcher
|
||||
from processor.algo_mod import searcher
|
||||
from processor.algo_mod.predictors import build_predictor
|
||||
|
||||
# def build_processor(config):
|
||||
# print(config)
|
||||
# processor_type = config.get("processor_type")
|
||||
# processor_mod = locals()[processor_type]
|
||||
# processor_name = config.get("processor_name")
|
||||
# return getattr(processor_mod, processor_name)
|
||||
|
||||
def build_processor(config):
|
||||
processor_type = config.get("processor_type")
|
||||
processor_mod = locals()[processor_type]
|
||||
processor_name = config.get("processor_name")
|
||||
return getattr(processor_mod, processor_name)
|
||||
# class BaseProcessor(ABC):
|
||||
# @abstractmethod
|
||||
# def __init__(self, config):
|
||||
# pass
|
||||
|
||||
|
||||
class BaseProcessor(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, config):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def process(self, input_data):
|
||||
pass
|
||||
# @abstractmethod
|
||||
# def process(self, input_data):
|
||||
# pass
|
||||
|
@ -1,14 +1,23 @@
|
||||
from .. import BaseProcessor, build_processor
|
||||
from processor.algo_mod.data_processor import ImageProcessor
|
||||
from processor.algo_mod.post_processor.det import DetPostProcessor
|
||||
from processor.algo_mod.predictors import build_predictor
|
||||
|
||||
|
||||
class AlgoMod(BaseProcessor):
|
||||
def build_processor(config):
|
||||
# processor_type = config.get("processor_type")
|
||||
# processor_mod = locals()[processor_type]
|
||||
processor_name = config.get("name")
|
||||
return eval(processor_name)(config)
|
||||
|
||||
|
||||
class AlgoMod(object):
|
||||
def __init__(self, config):
|
||||
self.pre_processor = build_processor(config["pre_processor"])
|
||||
self.predictor = build_processor(config["predictor"])
|
||||
self.post_processor = build_processor(config["post_processor"])
|
||||
self.pre_processor = build_processor(config["preprocess"])
|
||||
self.predictor = build_predictor(config["predictor"])
|
||||
self.post_processor = build_processor(config["postprocess"])
|
||||
|
||||
def process(self, input_data):
|
||||
input_data = self.pre_processor(input_data)
|
||||
input_data = self.predictor(input_data)
|
||||
input_data = self.post_processor(input_data)
|
||||
input_data = self.pre_processor.process(input_data)
|
||||
input_data = self.predictor.process(input_data)
|
||||
input_data = self.post_processor.process(input_data)
|
||||
return input_data
|
||||
|
@ -1 +1,2 @@
|
||||
from image_processor import ImageProcessor
|
||||
from processor.algo_mod.data_processor.image_processor import ImageProcessor
|
||||
from processor.algo_mod.data_processor.bbox_cropper import BBoxCropper
|
||||
|
@ -1,4 +1,4 @@
|
||||
from processor import BaseProcessor
|
||||
from processor.algo_mod.data_processor.image_processor import BaseProcessor
|
||||
|
||||
|
||||
class BBoxCropper(BaseProcessor):
|
||||
|
@ -6,16 +6,29 @@ from PIL import Image
|
||||
import paddle
|
||||
|
||||
from utils import logger
|
||||
from processor import BaseProcessor
|
||||
# from processor import BaseProcessor
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ImageProcessor(BaseProcessor):
|
||||
class BaseProcessor(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def process(self, input_data):
|
||||
pass
|
||||
|
||||
|
||||
class ImageProcessor(object):
|
||||
def __init__(self, config):
|
||||
self.processors = []
|
||||
for processor_config in config.get("image_processors"):
|
||||
for processor_config in config.get("processors"):
|
||||
name = list(processor_config)[0]
|
||||
param = {} if processor_config[name] is None else processor_config[name]
|
||||
op = locals()[name](**param)
|
||||
param = {} if processor_config[name] is None else processor_config[
|
||||
name]
|
||||
op = eval(name)(**param)
|
||||
self.processors.append(op)
|
||||
|
||||
def process(self, input_data):
|
||||
@ -30,25 +43,53 @@ class ImageProcessor(BaseProcessor):
|
||||
|
||||
|
||||
class GetShapeInfo(BaseProcessor):
|
||||
def __init__(self):
|
||||
pass
|
||||
def __init__(self, order="hwc"):
|
||||
super().__init__()
|
||||
self.order = order
|
||||
|
||||
def process(self, input_data):
|
||||
input_image = input_data["input_image"]
|
||||
image = input_data["image"]
|
||||
input_data['im_shape'] = np.array(input_image.shape[:2], dtype=np.float32)
|
||||
if self.order == "hwc":
|
||||
input_data['im_shape'] = np.array(
|
||||
(image.shape[:2], ), dtype=np.float32)
|
||||
input_data['scale_factor'] = np.array(
|
||||
[
|
||||
image.shape[0] / input_image.shape[0],
|
||||
image.shape[1] / input_image.shape[1]
|
||||
],
|
||||
dtype=np.float32)
|
||||
else:
|
||||
input_data['im_shape'] = np.array(
|
||||
(image.shape[1:], ), dtype=np.float32)
|
||||
input_data['scale_factor'] = np.array(
|
||||
[
|
||||
image.shape[2] / input_image.shape[0],
|
||||
image.shape[1] / input_image.shape[1]
|
||||
],
|
||||
dtype=np.float32)
|
||||
input_data['input_shape'] = np.array(image.shape[:2], dtype=np.float32)
|
||||
input_data['scale_factor'] = np.array([image.shape[0] / input_image.shape[0],
|
||||
image.shape[1] / input_image.shape[1]], dtype=np.float32)
|
||||
print(image.shape[0])
|
||||
return input_data
|
||||
|
||||
|
||||
class ToTensor(BaseProcessor):
|
||||
def __init__(self, config):
|
||||
pass
|
||||
# class ToTensor(BaseProcessor):
|
||||
# def __init__(self):
|
||||
# super().__init__()
|
||||
|
||||
# def process(self, input_data):
|
||||
# image = input_data["image"]
|
||||
# input_data["input_tensor"] = paddle.to_tensor(image)
|
||||
# return input_data
|
||||
|
||||
|
||||
class ToBatch(BaseProcessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def process(self, input_data):
|
||||
image = input_data["image"]
|
||||
input_data["input_tensor"] = paddle.to_tensor(image)
|
||||
input_data["image"] = image[np.newaxis, :, :, :]
|
||||
return input_data
|
||||
|
||||
|
||||
@ -123,8 +164,7 @@ class ResizeImage:
|
||||
else:
|
||||
logger.warning(
|
||||
f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. "
|
||||
f"Use \"cv2\" instead."
|
||||
)
|
||||
f"Use \"cv2\" instead.")
|
||||
self.resize_func = cv2.resize
|
||||
|
||||
def __call__(self, img):
|
||||
@ -191,7 +231,8 @@ class NormalizeImage:
|
||||
self.std = np.array(std).reshape(shape).astype('float32')
|
||||
|
||||
def __call__(self, img):
|
||||
assert isinstance(img, np.ndarray), "invalid input 'img' in NormalizeImage"
|
||||
assert isinstance(img,
|
||||
np.ndarray), "invalid input 'img' in NormalizeImage"
|
||||
|
||||
img = (img.astype('float32') * self.scale - self.mean) / self.std
|
||||
|
||||
|
@ -0,0 +1,35 @@
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class DetPostProcessor(object):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.threshold = config["threshold"]
|
||||
self.label_list = config["label_list"]
|
||||
self.max_det_results = config["max_det_results"]
|
||||
|
||||
def process(self, pred):
|
||||
np_boxes = pred["save_infer_model/scale_0.tmp_1"]
|
||||
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
|
||||
print('[WARNNING] No object detected.')
|
||||
np_boxes = np.array([])
|
||||
|
||||
keep_indexes = np_boxes[:, 1].argsort()[::-1][:self.max_det_results]
|
||||
results = []
|
||||
for idx in keep_indexes:
|
||||
single_res = np_boxes[idx]
|
||||
class_id = int(single_res[0])
|
||||
score = single_res[1]
|
||||
bbox = single_res[2:]
|
||||
if score < self.threshold:
|
||||
continue
|
||||
label_name = self.label_list[class_id]
|
||||
results.append({
|
||||
"class_id": class_id,
|
||||
"score": score,
|
||||
"bbox": bbox,
|
||||
"label_name": label_name,
|
||||
})
|
||||
return results
|
@ -1,7 +1,11 @@
|
||||
from .fake_cls import FakeClassifier
|
||||
from processor.algo_mod.predictors.paddle_predictor import Predictor as paddle_predictor
|
||||
from processor.algo_mod.predictors.onnx_predictor import Predictor as onnx_predictor
|
||||
|
||||
|
||||
def build_algo_mod(config):
|
||||
algo_name = config.get("algo_name")
|
||||
if algo_name == "fake_clas":
|
||||
return FakeClassifier(config)
|
||||
def build_predictor(config):
|
||||
# if use paddle backend
|
||||
if True:
|
||||
return paddle_predictor(config)
|
||||
# if use onnx backend
|
||||
else:
|
||||
return onnx_predictor(config)
|
@ -1,9 +0,0 @@
|
||||
from processor import BaseProcessor
|
||||
|
||||
|
||||
class FakeClassifier(BaseProcessor):
|
||||
def __init__(self, config):
|
||||
pass
|
||||
|
||||
def process(self, input_data):
|
||||
pass
|
@ -1,6 +0,0 @@
|
||||
class FakeDetector:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def predict(self):
|
||||
pass
|
@ -0,0 +1,3 @@
|
||||
class Predictor(object):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
@ -1,3 +1,65 @@
|
||||
from paddle.inference import create_predictor, Config
|
||||
import os
|
||||
import platform
|
||||
|
||||
from paddle.inference import create_predictor
|
||||
from paddle.inference import Config as PaddleConfig
|
||||
|
||||
|
||||
class Predictor(object):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
# HALF precission predict only work when using tensorrt
|
||||
if config.get("use_fp16", False):
|
||||
assert config.get("use_tensorrt", False) is True
|
||||
|
||||
inference_model_dir = config["inference_model_dir"]
|
||||
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
|
||||
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
|
||||
paddle_config = PaddleConfig(model_file, params_file)
|
||||
|
||||
if config.get("use_gpu", False):
|
||||
paddle_config.enable_use_gpu(config.get("gpu_mem", 8000), 0)
|
||||
else:
|
||||
paddle_config.disable_gpu()
|
||||
if config.get("enable_mkldnn", False):
|
||||
# there is no set_mkldnn_cache_capatity() on macOS
|
||||
if platform.system() != "Darwin":
|
||||
# cache 10 different shapes for mkldnn to avoid memory leak
|
||||
paddle_config.set_mkldnn_cache_capacity(10)
|
||||
paddle_config.enable_mkldnn()
|
||||
paddle_config.set_cpu_math_library_num_threads(
|
||||
config.get("cpu_num_threads", 10))
|
||||
|
||||
if config.get("enable_profile", False):
|
||||
paddle_config.enable_profile()
|
||||
paddle_config.disable_glog_info()
|
||||
paddle_config.switch_ir_optim(config.get("ir_optim",
|
||||
True)) # default true
|
||||
if config.get("use_tensorrt", True):
|
||||
paddle_config.enable_tensorrt_engine(
|
||||
precision_mode=PaddleConfig.Precision.Half
|
||||
if config.get("use_fp16", False) else
|
||||
PaddleConfig.Precision.Float32,
|
||||
max_batch_size=config.get("batch_size", 1),
|
||||
workspace_size=1 << 30,
|
||||
min_subgraph_size=30)
|
||||
|
||||
paddle_config.enable_memory_optim()
|
||||
# use zero copy
|
||||
paddle_config.switch_use_feed_fetch_ops(False)
|
||||
self.predictor = create_predictor(paddle_config)
|
||||
|
||||
def process(self, input_data):
|
||||
input_names = self.predictor.get_input_names()
|
||||
for input_name in input_names:
|
||||
input_tensor = self.predictor.get_input_handle(input_name)
|
||||
input_tensor.copy_from_cpu(input_data[input_name])
|
||||
self.predictor.run()
|
||||
|
||||
output_data = {}
|
||||
output_names = self.predictor.get_output_names()
|
||||
for output_name in output_names:
|
||||
output = self.predictor.get_output_handle(output_name)
|
||||
output_data[output_name] = output.copy_to_cpu()
|
||||
|
||||
return output_data
|
||||
|
22
deploy/python/ppshitu_v2/utils/get_image_list.py
Normal file
22
deploy/python/ppshitu_v2/utils/get_image_list.py
Normal file
@ -0,0 +1,22 @@
|
||||
import os
|
||||
import argparse
|
||||
import base64
|
||||
import numpy as np
|
||||
|
||||
|
||||
def get_image_list(img_file):
|
||||
imgs_lists = []
|
||||
if img_file is None or not os.path.exists(img_file):
|
||||
raise Exception("not found any img file in {}".format(img_file))
|
||||
|
||||
img_end = ['jpg', 'png', 'jpeg', 'JPEG', 'JPG', 'bmp']
|
||||
if os.path.isfile(img_file) and img_file.split('.')[-1] in img_end:
|
||||
imgs_lists.append(img_file)
|
||||
elif os.path.isdir(img_file):
|
||||
for single_file in os.listdir(img_file):
|
||||
if single_file.split('.')[-1] in img_end:
|
||||
imgs_lists.append(os.path.join(img_file, single_file))
|
||||
if len(imgs_lists) == 0:
|
||||
raise Exception("not found any img file in {}".format(img_file))
|
||||
imgs_lists = sorted(imgs_lists)
|
||||
return imgs_lists
|
Loading…
x
Reference in New Issue
Block a user