mirror of
https://github.com/PaddlePaddle/PaddleOCR.git
synced 2025-06-03 21:53:39 +08:00
add ocr cpp pferx to logi
This commit is contained in:
parent
aedddd8107
commit
dc4f1c2174
@ -42,17 +42,17 @@ template <typename ConfigT> int PPredictor::_init(ConfigT &config) {
|
|||||||
// CL_PRECISION_FP32: 1, force fp32
|
// CL_PRECISION_FP32: 1, force fp32
|
||||||
// CL_PRECISION_FP16: 2, force fp16
|
// CL_PRECISION_FP16: 2, force fp16
|
||||||
config.set_opencl_precision(paddle::lite_api::CL_PRECISION_FP32);
|
config.set_opencl_precision(paddle::lite_api::CL_PRECISION_FP32);
|
||||||
LOGI("device: running on gpu.");
|
LOGI("ocr cpp device: running on gpu.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOGI("device: running on cpu.");
|
LOGI("ocr cpp device: running on cpu.");
|
||||||
// you can give backup cpu nb model instead
|
// you can give backup cpu nb model instead
|
||||||
// config.set_model_from_file(cpu_nb_model_dir);
|
// config.set_model_from_file(cpu_nb_model_dir);
|
||||||
}
|
}
|
||||||
config.set_threads(_thread_num);
|
config.set_threads(_thread_num);
|
||||||
config.set_power_mode(_mode);
|
config.set_power_mode(_mode);
|
||||||
_predictor = paddle::lite_api::CreatePaddlePredictor(config);
|
_predictor = paddle::lite_api::CreatePaddlePredictor(config);
|
||||||
LOGI("paddle instance created");
|
LOGI("ocr cpp paddle instance created");
|
||||||
return RETURN_OK;
|
return RETURN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,18 +73,18 @@ std::vector<PredictorInput> PPredictor::get_inputs(int num) {
|
|||||||
PredictorInput PPredictor::get_first_input() { return get_input(0); }
|
PredictorInput PPredictor::get_first_input() { return get_input(0); }
|
||||||
|
|
||||||
std::vector<PredictorOutput> PPredictor::infer() {
|
std::vector<PredictorOutput> PPredictor::infer() {
|
||||||
LOGI("infer Run start %d", _net_flag);
|
LOGI("ocr cpp infer Run start %d", _net_flag);
|
||||||
std::vector<PredictorOutput> results;
|
std::vector<PredictorOutput> results;
|
||||||
if (!_is_input_get) {
|
if (!_is_input_get) {
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
_predictor->Run();
|
_predictor->Run();
|
||||||
LOGI("infer Run end");
|
LOGI("ocr cpp infer Run end");
|
||||||
|
|
||||||
for (int i = 0; i < _predictor->GetOutputNames().size(); i++) {
|
for (int i = 0; i < _predictor->GetOutputNames().size(); i++) {
|
||||||
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
|
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
|
||||||
_predictor->GetOutput(i);
|
_predictor->GetOutput(i);
|
||||||
LOGI("output tensor[%d] size %ld", i, product(output_tensor->shape()));
|
LOGI("ocr cpp output tensor[%d] size %ld", i, product(output_tensor->shape()));
|
||||||
PredictorOutput result{std::move(output_tensor), i, _net_flag};
|
PredictorOutput result{std::move(output_tensor), i, _net_flag};
|
||||||
results.emplace_back(std::move(result));
|
results.emplace_back(std::move(result));
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user