Add preprocess in HubServing
parent
a378af3886
commit
a2376ee19e
|
@ -15,7 +15,7 @@ hubserving/clas/
|
|||
### 1. 准备环境
|
||||
```shell
|
||||
# 安装paddlehub,请安装2.0版本
|
||||
pip3 install paddlehub==2.0.0b1 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
### 2. 下载推理模型
|
||||
|
@ -128,8 +128,12 @@ python hubserving/test_hubserving.py server_url image_path
|
|||
`http://[ip_address]:[port]/predict/[module_name]`
|
||||
- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。
|
||||
- **batch_size**:[**可选**] 以`batch_size`大小为单位进行预测,默认为`1`。
|
||||
- **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为`256`。
|
||||
- **crop_size**:[**可选**] 预处理时,居中裁剪的大小,默认为`224`。
|
||||
- **normalize**:[**可选**] 预处理时,是否进行`normalize`,默认为`True`。
|
||||
- **to_chw**:[**可选**] 预处理时,是否调整为`CHW`顺序,默认为`True`。
|
||||
|
||||
**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸。需要指定`--resize_short=384 --resize=384`。
|
||||
**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸。需要`--resize_short=384 --crop_size=384`。
|
||||
|
||||
|
||||
访问示例:
|
||||
|
|
|
@ -15,7 +15,7 @@ hubserving/clas/
|
|||
### 1. Prepare the environment
|
||||
```shell
|
||||
# Install version 2.0 of PaddleHub
|
||||
pip3 install paddlehub==2.0.0b1 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
### 2. Download inference model
|
||||
|
@ -126,9 +126,13 @@ Two required parameters need to be passed to the script:
|
|||
`http://[ip_address]:[port]/predict/[module_name]`
|
||||
- **image_path**: Test image path, can be a single image path or an image directory path
|
||||
- **batch_size**: [**Optional**] batch_size. Default by `1`.
|
||||
- **resize_short**: [**Optional**] In preprocessing, resize according to short size. Default by `256`。
|
||||
- **crop_size**: [**Optional**] In preprocessing, centor crop size. Default by `224`。
|
||||
- **normalize**: [**Optional**] In preprocessing, whether to do `normalize`. Default by `True`。
|
||||
- **to_chw**: [**Optional**] In preprocessing, whether to transpose to `CHW`. Default by `True`。
|
||||
|
||||
**Notice**:
|
||||
If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--resize=384`.
|
||||
If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--crop_size=384`.
|
||||
|
||||
**Eg.**
|
||||
```shell
|
||||
|
|
|
@ -32,30 +32,59 @@ from utils import config
|
|||
from utils.encode_decode import np_to_b64
|
||||
from python.preprocess import create_operators
|
||||
|
||||
preprocess_config = [{
|
||||
|
||||
def get_args():
|
||||
def str2bool(v):
|
||||
return v.lower() in ("true", "t", "1")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--server_url", type=str)
|
||||
parser.add_argument("--image_file", type=str)
|
||||
parser.add_argument("--batch_size", type=int, default=1)
|
||||
parser.add_argument("--resize_short", type=int, default=256)
|
||||
parser.add_argument("--crop_size", type=int, default=224)
|
||||
parser.add_argument("--normalize", type=str2bool, default=True)
|
||||
parser.add_argument("--to_chw", type=str2bool, default=True)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
class PreprocessConfig(object):
|
||||
def __init__(self,
|
||||
resize_short=256,
|
||||
crop_size=224,
|
||||
normalize=True,
|
||||
to_chw=True):
|
||||
self.config = [{
|
||||
'ResizeImage': {
|
||||
'resize_short': 256
|
||||
'resize_short': resize_short
|
||||
}
|
||||
}, {
|
||||
}, {
|
||||
'CropImage': {
|
||||
'size': 224
|
||||
'size': crop_size
|
||||
}
|
||||
}, {
|
||||
}]
|
||||
if normalize:
|
||||
self.config.append({
|
||||
'NormalizeImage': {
|
||||
'scale': 0.00392157,
|
||||
'mean': [0.485, 0.456, 0.406],
|
||||
'std': [0.229, 0.224, 0.225],
|
||||
'order': ''
|
||||
}
|
||||
}, {
|
||||
'ToCHWImage': None
|
||||
}]
|
||||
})
|
||||
if to_chw:
|
||||
self.config.append({'ToCHWImage': None})
|
||||
|
||||
def __call__(self):
|
||||
return self.config
|
||||
|
||||
|
||||
def main(args):
|
||||
image_path_list = get_image_list(args.image_file)
|
||||
headers = {"Content-type": "application/json"}
|
||||
preprocess_ops = create_operators(preprocess_config)
|
||||
preprocess_ops = create_operators(
|
||||
PreprocessConfig(args.resize_short, args.crop_size, args.normalize,
|
||||
args.to_chw)())
|
||||
|
||||
cnt = 0
|
||||
predict_time = 0
|
||||
|
@ -113,14 +142,10 @@ def main(args):
|
|||
|
||||
for number, result_list in enumerate(preds):
|
||||
all_score += result_list["scores"][0]
|
||||
result_str = ""
|
||||
for i in range(len(result_list["class_ids"])):
|
||||
result_str += "{}: {:.2f}\t".format(
|
||||
result_list["class_ids"][i],
|
||||
result_list["scores"][i])
|
||||
|
||||
pred_str = ", ".join(
|
||||
[f"{k}: {result_list[k]}" for k in result_list])
|
||||
logger.info(
|
||||
f"File:{img_name_list[number]}, The result(s): {result_str}"
|
||||
f"File:{img_name_list[number]}, The result(s): {pred_str}"
|
||||
)
|
||||
|
||||
finally:
|
||||
|
@ -136,10 +161,5 @@ def main(args):
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--server_url", type=str)
|
||||
parser.add_argument("--image_file", type=str)
|
||||
parser.add_argument("--batch_size", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
|
||||
args = get_args()
|
||||
main(args)
|
||||
|
|
Loading…
Reference in New Issue