mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* executor prototype * add split/when_all * fix GCC build * WIP let_value * fix let_value * WIP ensure_started * ensure_started & start_detached * fix let_value + when_all combo on MSVC 142 * fix static thread pool * generic just, then, let_value, sync_wait * minor * generic split and when_all * fully generic sender adapters * when_all: workaround for GCC7 * support legacy spdlog * fix memleak * bulk * static detector * fix bulk & first pipeline * bulk for static thread pools * fix on MSVC * WIP async batch submission * WIP collation * async batch * fix detector * fix async detector * fix * fix * debug * fix cuda allocator * WIP type erased executor * better type erasure * simplify C API impl * Expand & type erase TC * deduction guide for type erased senders * fix GCC build * when_all for arrays of Value senders * WIP pipeline v2 * WIP pipeline parser * WIP timed batch operation * add registry * experiment * fix pipeline * naming * fix mem-leak * fix deferred batch operation * WIP * WIP configurable scheduler * WIP configurable scheduler * add comment * parse scheduler config * force link schedulers * WIP pipeable sender * WIP CPO * ADL isolation and dismantle headers * type erase single thread context * fix MSVC build * CPO * replace decay_t with remove_cvref_t * structure adjustment * structure adjustment * apply CPOs & C API rework * refine C API * detector async C API * adjust detector async C API * # Conflicts: # csrc/apis/c/detector.cpp * fix when_all for type erased senders * support void return for Then * async detector * fix some CPOs * minor * WIP rework capture mechanism for type erased types * minor fix * fix MSVC build * move expand.h to execution * make `Expand` pipeable * fix type erased * un-templatize `_TypeErasedOperation` * re-work C API * remove async_detector C API * fix pipeline * add flatten & unflatten * fix flatten & unflatten * add aync OCR demo * config executor for nodes & better executor API * working async OCR example * minor * dynamic batch via scheduler * dynamic batch on `Value` * fix MSVC build * type erase dynamic batch scheduler * sender as Python Awaitable * naming * naming * add docs * minor * merge tmp branch * unify C APIs * fix ocr * unify APIs * fix typo * update async OCR demo * add v3 API text recognizer * fix v3 API * fix lint * add license info & reformat * add demo async_ocr_v2 * revert files * revert files * resolve link issues * fix scheduler linkage for shared libs * fix license header * add docs for `mmdeploy_executor_split` * add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute` * make `TimedSingleThreadContext` header only * fix lint * simplify type-erased sender
51 lines
1.5 KiB
C++
51 lines
1.5 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "inference.h"
|
|
|
|
#include "archive/json_archive.h"
|
|
#include "core/model.h"
|
|
|
|
namespace mmdeploy::graph {
|
|
|
|
Result<unique_ptr<Inference>> InferenceParser::Parse(const Value& config) {
|
|
try {
|
|
auto& model_config = config["params"]["model"];
|
|
Model model;
|
|
if (model_config.is_any<Model>()) {
|
|
model = model_config.get<Model>();
|
|
} else {
|
|
model = Model(model_config.get<string>());
|
|
}
|
|
OUTCOME_TRY(auto pipeline_json, model.ReadFile("pipeline.json"));
|
|
auto json = nlohmann::json::parse(pipeline_json);
|
|
|
|
auto context = config.value("context", Value(ValueType::kObject));
|
|
context["model"] = std::move(model);
|
|
|
|
auto pipeline_config = from_json<Value>(json);
|
|
pipeline_config["context"] = context;
|
|
|
|
auto inference = std::make_unique<Inference>();
|
|
OUTCOME_TRY(NodeParser::Parse(config, *inference));
|
|
OUTCOME_TRY(inference->pipeline_, PipelineParser{}.Parse(pipeline_config));
|
|
|
|
return std::move(inference);
|
|
} catch (const Exception& e) {
|
|
MMDEPLOY_ERROR("exception: {}", e.what());
|
|
return failure(e.code());
|
|
}
|
|
}
|
|
|
|
class InferenceCreator : public Creator<Node> {
|
|
public:
|
|
const char* GetName() const override { return "Inference"; }
|
|
int GetVersion() const override { return 0; }
|
|
std::unique_ptr<Node> Create(const Value& value) override {
|
|
return InferenceParser::Parse(value).value();
|
|
}
|
|
};
|
|
|
|
REGISTER_MODULE(Node, InferenceCreator);
|
|
|
|
} // namespace mmdeploy::graph
|