Add preprocess in HubServing
parent
a378af3886
commit
a2376ee19e
|
@ -15,7 +15,7 @@ hubserving/clas/
|
||||||
### 1. 准备环境
|
### 1. 准备环境
|
||||||
```shell
|
```shell
|
||||||
# 安装paddlehub,请安装2.0版本
|
# 安装paddlehub,请安装2.0版本
|
||||||
pip3 install paddlehub==2.0.0b1 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2. 下载推理模型
|
### 2. 下载推理模型
|
||||||
|
@ -128,8 +128,12 @@ python hubserving/test_hubserving.py server_url image_path
|
||||||
`http://[ip_address]:[port]/predict/[module_name]`
|
`http://[ip_address]:[port]/predict/[module_name]`
|
||||||
- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。
|
- **image_path**:测试图像路径,可以是单张图片路径,也可以是图像集合目录路径。
|
||||||
- **batch_size**:[**可选**] 以`batch_size`大小为单位进行预测,默认为`1`。
|
- **batch_size**:[**可选**] 以`batch_size`大小为单位进行预测,默认为`1`。
|
||||||
|
- **resize_short**:[**可选**] 预处理时,按短边调整大小,默认为`256`。
|
||||||
|
- **crop_size**:[**可选**] 预处理时,居中裁剪的大小,默认为`224`。
|
||||||
|
- **normalize**:[**可选**] 预处理时,是否进行`normalize`,默认为`True`。
|
||||||
|
- **to_chw**:[**可选**] 预处理时,是否调整为`CHW`顺序,默认为`True`。
|
||||||
|
|
||||||
**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸。需要指定`--resize_short=384 --resize=384`。
|
**注意**:如果使用`Transformer`系列模型,如`DeiT_***_384`, `ViT_***_384`等,请注意模型的输入数据尺寸。需要`--resize_short=384 --crop_size=384`。
|
||||||
|
|
||||||
|
|
||||||
访问示例:
|
访问示例:
|
||||||
|
|
|
@ -15,7 +15,7 @@ hubserving/clas/
|
||||||
### 1. Prepare the environment
|
### 1. Prepare the environment
|
||||||
```shell
|
```shell
|
||||||
# Install version 2.0 of PaddleHub
|
# Install version 2.0 of PaddleHub
|
||||||
pip3 install paddlehub==2.0.0b1 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
pip3 install paddlehub==2.1.0 --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2. Download inference model
|
### 2. Download inference model
|
||||||
|
@ -126,9 +126,13 @@ Two required parameters need to be passed to the script:
|
||||||
`http://[ip_address]:[port]/predict/[module_name]`
|
`http://[ip_address]:[port]/predict/[module_name]`
|
||||||
- **image_path**: Test image path, can be a single image path or an image directory path
|
- **image_path**: Test image path, can be a single image path or an image directory path
|
||||||
- **batch_size**: [**Optional**] batch_size. Default by `1`.
|
- **batch_size**: [**Optional**] batch_size. Default by `1`.
|
||||||
|
- **resize_short**: [**Optional**] In preprocessing, resize according to short size. Default by `256`。
|
||||||
|
- **crop_size**: [**Optional**] In preprocessing, centor crop size. Default by `224`。
|
||||||
|
- **normalize**: [**Optional**] In preprocessing, whether to do `normalize`. Default by `True`。
|
||||||
|
- **to_chw**: [**Optional**] In preprocessing, whether to transpose to `CHW`. Default by `True`。
|
||||||
|
|
||||||
**Notice**:
|
**Notice**:
|
||||||
If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--resize=384`.
|
If you want to use `Transformer series models`, such as `DeiT_***_384`, `ViT_***_384`, etc., please pay attention to the input size of model, and need to set `--resize_short=384`, `--crop_size=384`.
|
||||||
|
|
||||||
**Eg.**
|
**Eg.**
|
||||||
```shell
|
```shell
|
||||||
|
|
|
@ -32,30 +32,59 @@ from utils import config
|
||||||
from utils.encode_decode import np_to_b64
|
from utils.encode_decode import np_to_b64
|
||||||
from python.preprocess import create_operators
|
from python.preprocess import create_operators
|
||||||
|
|
||||||
preprocess_config = [{
|
|
||||||
'ResizeImage': {
|
def get_args():
|
||||||
'resize_short': 256
|
def str2bool(v):
|
||||||
}
|
return v.lower() in ("true", "t", "1")
|
||||||
}, {
|
|
||||||
'CropImage': {
|
parser = argparse.ArgumentParser()
|
||||||
'size': 224
|
parser.add_argument("--server_url", type=str)
|
||||||
}
|
parser.add_argument("--image_file", type=str)
|
||||||
}, {
|
parser.add_argument("--batch_size", type=int, default=1)
|
||||||
'NormalizeImage': {
|
parser.add_argument("--resize_short", type=int, default=256)
|
||||||
'scale': 0.00392157,
|
parser.add_argument("--crop_size", type=int, default=224)
|
||||||
'mean': [0.485, 0.456, 0.406],
|
parser.add_argument("--normalize", type=str2bool, default=True)
|
||||||
'std': [0.229, 0.224, 0.225],
|
parser.add_argument("--to_chw", type=str2bool, default=True)
|
||||||
'order': ''
|
return parser.parse_args()
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'ToCHWImage': None
|
class PreprocessConfig(object):
|
||||||
}]
|
def __init__(self,
|
||||||
|
resize_short=256,
|
||||||
|
crop_size=224,
|
||||||
|
normalize=True,
|
||||||
|
to_chw=True):
|
||||||
|
self.config = [{
|
||||||
|
'ResizeImage': {
|
||||||
|
'resize_short': resize_short
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'CropImage': {
|
||||||
|
'size': crop_size
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
if normalize:
|
||||||
|
self.config.append({
|
||||||
|
'NormalizeImage': {
|
||||||
|
'scale': 0.00392157,
|
||||||
|
'mean': [0.485, 0.456, 0.406],
|
||||||
|
'std': [0.229, 0.224, 0.225],
|
||||||
|
'order': ''
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if to_chw:
|
||||||
|
self.config.append({'ToCHWImage': None})
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
return self.config
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
image_path_list = get_image_list(args.image_file)
|
image_path_list = get_image_list(args.image_file)
|
||||||
headers = {"Content-type": "application/json"}
|
headers = {"Content-type": "application/json"}
|
||||||
preprocess_ops = create_operators(preprocess_config)
|
preprocess_ops = create_operators(
|
||||||
|
PreprocessConfig(args.resize_short, args.crop_size, args.normalize,
|
||||||
|
args.to_chw)())
|
||||||
|
|
||||||
cnt = 0
|
cnt = 0
|
||||||
predict_time = 0
|
predict_time = 0
|
||||||
|
@ -113,14 +142,10 @@ def main(args):
|
||||||
|
|
||||||
for number, result_list in enumerate(preds):
|
for number, result_list in enumerate(preds):
|
||||||
all_score += result_list["scores"][0]
|
all_score += result_list["scores"][0]
|
||||||
result_str = ""
|
pred_str = ", ".join(
|
||||||
for i in range(len(result_list["class_ids"])):
|
[f"{k}: {result_list[k]}" for k in result_list])
|
||||||
result_str += "{}: {:.2f}\t".format(
|
|
||||||
result_list["class_ids"][i],
|
|
||||||
result_list["scores"][i])
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"File:{img_name_list[number]}, The result(s): {result_str}"
|
f"File:{img_name_list[number]}, The result(s): {pred_str}"
|
||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
@ -136,10 +161,5 @@ def main(args):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
args = get_args()
|
||||||
parser.add_argument("--server_url", type=str)
|
|
||||||
parser.add_argument("--image_file", type=str)
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
main(args)
|
main(args)
|
||||||
|
|
Loading…
Reference in New Issue