mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
[Enhancement] Load openvino model from memory (#2027)
* fix cpu docker (different openvino ver for python & sdk) * load openvino model from memory * fix lint
This commit is contained in:
parent
ca773a78f0
commit
bc79c0dd2b
@ -77,30 +77,18 @@ Result<void> OpenVINONet::Init(const Value& args) {
|
|||||||
auto model = context["model"].get<Model>();
|
auto model = context["model"].get<Model>();
|
||||||
OUTCOME_TRY(auto config, model.GetModelConfig(name));
|
OUTCOME_TRY(auto config, model.GetModelConfig(name));
|
||||||
|
|
||||||
// TODO: read network with stream
|
|
||||||
// save xml and bin to temp file
|
|
||||||
auto tmp_dir = fs::temp_directory_path();
|
|
||||||
std::string tmp_xml = (tmp_dir / fs::path("tmp.xml")).string();
|
|
||||||
std::string tmp_bin = (tmp_dir / fs::path("tmp.bin")).string();
|
|
||||||
OUTCOME_TRY(auto raw_xml, model.ReadFile(config.net));
|
OUTCOME_TRY(auto raw_xml, model.ReadFile(config.net));
|
||||||
OUTCOME_TRY(auto raw_bin, model.ReadFile(config.weights));
|
OUTCOME_TRY(auto raw_bin, model.ReadFile(config.weights));
|
||||||
|
auto ov_tensor = InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {raw_bin.size()},
|
||||||
try {
|
InferenceEngine::Layout::C);
|
||||||
std::ofstream xml_out(tmp_xml, std::ios::binary);
|
auto ov_blob = InferenceEngine::make_shared_blob<uint8_t>(ov_tensor);
|
||||||
xml_out << raw_xml;
|
ov_blob->allocate();
|
||||||
xml_out.close();
|
memcpy(ov_blob->buffer(), raw_bin.data(), ov_blob->byteSize());
|
||||||
std::ofstream bin_out(tmp_bin, std::ios::binary);
|
|
||||||
bin_out << raw_bin;
|
|
||||||
bin_out.close();
|
|
||||||
} catch (const std::exception& e) {
|
|
||||||
MMDEPLOY_ERROR("unhandled exception when creating tmp xml/bin: {}", e.what());
|
|
||||||
return Status(eFail);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// create cnnnetwork
|
// create cnnnetwork
|
||||||
core_ = InferenceEngine::Core();
|
core_ = InferenceEngine::Core();
|
||||||
network_ = core_.ReadNetwork(tmp_xml, tmp_bin);
|
network_ = core_.ReadNetwork(raw_xml, std::move(ov_blob));
|
||||||
|
|
||||||
// set input tensor
|
// set input tensor
|
||||||
InferenceEngine::InputsDataMap input_info = network_.getInputsInfo();
|
InferenceEngine::InputsDataMap input_info = network_.getInputsInfo();
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM openvino/ubuntu18_dev:2021.4.2
|
FROM openvino/ubuntu20_dev:2022.3.0
|
||||||
ARG PYTHON_VERSION=3.8
|
ARG PYTHON_VERSION=3.8
|
||||||
ARG TORCH_VERSION=1.10.0
|
ARG TORCH_VERSION=1.10.0
|
||||||
ARG TORCHVISION_VERSION=0.11.0
|
ARG TORCHVISION_VERSION=0.11.0
|
||||||
@ -57,7 +57,7 @@ RUN if [ ${USE_SRC_INSIDE} == true ] ; \
|
|||||||
RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html \
|
RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html \
|
||||||
&& /opt/conda/bin/pip install --no-cache-dir openmim
|
&& /opt/conda/bin/pip install --no-cache-dir openmim
|
||||||
|
|
||||||
RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev mmengine${MMENGINE_VERSION}
|
RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev==2022.3.0 mmengine${MMENGINE_VERSION}
|
||||||
|
|
||||||
ENV PATH /opt/conda/bin:$PATH
|
ENV PATH /opt/conda/bin:$PATH
|
||||||
WORKDIR /root/workspace
|
WORKDIR /root/workspace
|
||||||
@ -100,14 +100,14 @@ RUN git clone -b main https://github.com/open-mmlab/mmdeploy.git &&\
|
|||||||
/opt/conda/bin/mim install -e .
|
/opt/conda/bin/mim install -e .
|
||||||
|
|
||||||
### build SDK
|
### build SDK
|
||||||
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64:${LD_LIBRARY_PATH}"
|
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}"
|
||||||
RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \
|
RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \
|
||||||
-DMMDEPLOY_BUILD_SDK=ON \
|
-DMMDEPLOY_BUILD_SDK=ON \
|
||||||
-DMMDEPLOY_BUILD_EXAMPLES=ON \
|
-DMMDEPLOY_BUILD_EXAMPLES=ON \
|
||||||
-DCMAKE_CXX_COMPILER=g++-7 \
|
-DCMAKE_CXX_COMPILER=g++-9 \
|
||||||
-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \
|
-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \
|
||||||
-Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \
|
-Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \
|
||||||
-DInferenceEngine_DIR=/opt/intel/openvino/deployment_tools/inference_engine/share \
|
-DInferenceEngine_DIR=/opt/intel/openvino/runtime/cmake \
|
||||||
-DMMDEPLOY_TARGET_DEVICES=cpu \
|
-DMMDEPLOY_TARGET_DEVICES=cpu \
|
||||||
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
|
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
|
||||||
-DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \
|
-DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \
|
||||||
|
Loading…
x
Reference in New Issue
Block a user