1
0
mirror of https://github.com/open-mmlab/mmdeploy.git synced 2025-01-14 08:09:43 +08:00
lzhangzz 640aa03538
Support Windows ()
* minor changes

* support windows

* fix GCC build

* fix lint

* reformat

* fix Windows build

* fix GCC build

* search backend ops for onnxruntime

* fix lint

* fix lint

* code clean-up

* code clean-up

* fix clang build

* fix trt support

* fix cmake for ncnn

* fix cmake for openvino

* fix SDK Python API

* handle ops for other backends (ncnn, trt)

* handle SDK Python API library location

* robustify linkage

* fix cuda

* minor fix for openvino & ncnn

* use CMAKE_CUDA_ARCHITECTURES if set

* fix cuda preprocessor

* fix misc

* fix pplnn & pplcv, drop support for pplcv<0.6.0

* robustify cmake

* update build.md ()

* build dynamic modules as module library & fix demo (partially)

* fix candidate path for mmdeploy_python

* move "enable CUDA" to cmake config for demo

* refine demo cmake

* add comment

* fix ubuntu build

* revert docs/en/build.md

* fix C API

* fix lint

* Windows build doc ()

* check in docs related to mmdeploy build on windows

* update build guide on windows platform

* update build guide on windows platform

* make path of thirdparty libraries consistent

* make path consistency

* correct build command for custom ops

* correct build command for sdk

* update sdk build instructions

* update doc

* correct build command

* fix lint

* correct build command and fix lint

Co-authored-by: lvhan <lvhan@pjlab.org>

* trailing whitespace ()

* minor fix

* fix sr sdk model

* fix type deduction

* fix cudaFree after driver shutting down

* update ppl.cv installation warning ()

* fix device allocator threshold & fix lint

* update doc ()

* update ppl.cv installation warning

* missing 'git clone'

Co-authored-by: chenxin <chenxin2@sensetime.com>
Co-authored-by: zhangli <zhangli@sensetime.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: lvhan <lvhan@pjlab.org>
2022-02-24 20:08:44 +08:00

53 lines
1.5 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include "graph/inference.h"
#include "archive/json_archive.h"
#include "archive/value_archive.h"
#include "core/operator.h"
#include "graph/common.h"
namespace mmdeploy::graph {
Inference::Inference(const Value& cfg) : BaseNode(cfg) {
auto& model_value = cfg["params"]["model"];
if (model_value.is_any<Model>()) {
model_ = model_value.get<Model>();
} else if (model_value.is_string()) {
auto model_path = model_value.get<std::string>();
model_ = Model(model_path);
} else {
MMDEPLOY_ERROR("unsupported model specification");
throw_exception(eInvalidArgument);
}
auto pipeline_json = model_.ReadFile("pipeline.json").value();
auto json = nlohmann::json::parse(pipeline_json);
auto context = cfg.value("context", Value(ValueType::kObject));
context["model"] = model_;
auto value = from_json<Value>(json);
value["context"] = context;
pipeline_ = std::make_unique<Pipeline>(value);
if (!pipeline_) {
MMDEPLOY_ERROR("failed to create pipeline");
throw_exception(eFail);
}
}
void Inference::Build(TaskGraph& graph) { pipeline_->Build(graph); }
class InferenceNodeCreator : public Creator<Node> {
public:
const char* GetName() const override { return "Inference"; }
int GetVersion() const override { return 0; }
std::unique_ptr<Node> Create(const Value& value) override {
return std::make_unique<Inference>(value);
}
};
REGISTER_MODULE(Node, InferenceNodeCreator);
} // namespace mmdeploy::graph