mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* executor prototype * add split/when_all * fix GCC build * WIP let_value * fix let_value * WIP ensure_started * ensure_started & start_detached * fix let_value + when_all combo on MSVC 142 * fix static thread pool * generic just, then, let_value, sync_wait * minor * generic split and when_all * fully generic sender adapters * when_all: workaround for GCC7 * support legacy spdlog * fix memleak * bulk * static detector * fix bulk & first pipeline * bulk for static thread pools * fix on MSVC * WIP async batch submission * WIP collation * async batch * fix detector * fix async detector * fix * fix * debug * fix cuda allocator * WIP type erased executor * better type erasure * simplify C API impl * Expand & type erase TC * deduction guide for type erased senders * fix GCC build * when_all for arrays of Value senders * WIP pipeline v2 * WIP pipeline parser * WIP timed batch operation * add registry * experiment * fix pipeline * naming * fix mem-leak * fix deferred batch operation * WIP * WIP configurable scheduler * WIP configurable scheduler * add comment * parse scheduler config * force link schedulers * WIP pipeable sender * WIP CPO * ADL isolation and dismantle headers * type erase single thread context * fix MSVC build * CPO * replace decay_t with remove_cvref_t * structure adjustment * structure adjustment * apply CPOs & C API rework * refine C API * detector async C API * adjust detector async C API * # Conflicts: # csrc/apis/c/detector.cpp * fix when_all for type erased senders * support void return for Then * async detector * fix some CPOs * minor * WIP rework capture mechanism for type erased types * minor fix * fix MSVC build * move expand.h to execution * make `Expand` pipeable * fix type erased * un-templatize `_TypeErasedOperation` * re-work C API * remove async_detector C API * fix pipeline * add flatten & unflatten * fix flatten & unflatten * add aync OCR demo * config executor for nodes & better executor API * working async OCR example * minor * dynamic batch via scheduler * dynamic batch on `Value` * fix MSVC build * type erase dynamic batch scheduler * sender as Python Awaitable * naming * naming * add docs * minor * merge tmp branch * unify C APIs * fix ocr * unify APIs * fix typo * update async OCR demo * add v3 API text recognizer * fix v3 API * fix lint * add license info & reformat * add demo async_ocr_v2 * revert files * revert files * resolve link issues * fix scheduler linkage for shared libs * fix license header * add docs for `mmdeploy_executor_split` * add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute` * make `TimedSingleThreadContext` header only * fix lint * simplify type-erased sender
207 lines
5.7 KiB
C++
207 lines
5.7 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "apis/c/executor.h"
|
|
|
|
#include "common.h"
|
|
#include "common_internal.h"
|
|
#include "execution/when_all_value.h"
|
|
#include "executor_internal.h"
|
|
|
|
using namespace mmdeploy;
|
|
|
|
namespace {
|
|
|
|
mmdeploy_scheduler_t CreateScheduler(const char* type, const Value& config = Value()) {
|
|
try {
|
|
auto creator = Registry<SchedulerType>::Get().GetCreator(type);
|
|
if (!creator) {
|
|
MMDEPLOY_ERROR("creator for {} not found.", type);
|
|
return nullptr;
|
|
}
|
|
return Cast(new SchedulerType(creator->Create(config)));
|
|
} catch (const std::exception& e) {
|
|
MMDEPLOY_ERROR("failed to create {}, error: {}", type, e.what());
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
} // namespace
|
|
|
|
mmdeploy_sender_t mmdeploy_sender_copy(mmdeploy_sender_t input) {
|
|
if (!input) {
|
|
return nullptr;
|
|
}
|
|
return Take(SenderType(*Cast(input)));
|
|
}
|
|
|
|
int mmdeploy_sender_destroy(mmdeploy_sender_t sender) {
|
|
delete Cast(sender);
|
|
return 0;
|
|
}
|
|
|
|
mmdeploy_scheduler_t mmdeploy_executor_inline() { return CreateScheduler("Inline"); }
|
|
|
|
mmdeploy_scheduler_t mmdeploy_executor_system_pool() {
|
|
// create a thread pool context and hold its shared handle
|
|
static auto scheduler = *Cast(CreateScheduler("ThreadPool"));
|
|
// return a copy of the handle to the thread pool
|
|
return Cast(new SchedulerType(scheduler));
|
|
}
|
|
|
|
mmdeploy_scheduler_t mmdeploy_executor_create_thread_pool(int num_threads) {
|
|
return CreateScheduler("ThreadPool", {{"num_threads", num_threads}});
|
|
}
|
|
|
|
mmdeploy_scheduler_t mmdeploy_executor_create_thread() { return CreateScheduler("SingleThread"); }
|
|
|
|
mmdeploy_scheduler_t mmdeploy_executor_dynamic_batch(mmdeploy_scheduler_t scheduler,
|
|
int max_batch_size, int timeout) {
|
|
if (!scheduler) {
|
|
return nullptr;
|
|
}
|
|
return CreateScheduler(
|
|
"DynamicBatch",
|
|
{{"scheduler", *Cast(scheduler)}, {"max_batch_size", max_batch_size}, {"timeout", timeout}});
|
|
}
|
|
|
|
int mmdeploy_scheduler_destroy(mmdeploy_scheduler_t scheduler) {
|
|
delete Cast(scheduler);
|
|
return 0;
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_just(mmdeploy_value_t value) {
|
|
if (value) {
|
|
return Guard([&] { return Take(Just(*Cast(value))); });
|
|
} else {
|
|
return Take(Just(Value()));
|
|
}
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_schedule(mmdeploy_scheduler_t scheduler) {
|
|
if (!scheduler) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(Then(Schedule(*Cast(scheduler)), [] { return Value(); })); });
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_transfer_just(mmdeploy_scheduler_t scheduler,
|
|
mmdeploy_value_t value) {
|
|
if (!scheduler || !value) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(TransferJust(*Cast(scheduler), *Cast(value))); });
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_transfer(mmdeploy_sender_t input,
|
|
mmdeploy_scheduler_t scheduler) {
|
|
if (!input || !scheduler) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(Transfer(Take(input), *Cast(scheduler))); });
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_on(mmdeploy_scheduler_t scheduler, mmdeploy_sender_t input) {
|
|
if (!scheduler || !input) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(On(*Cast(scheduler), Take(input))); });
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_then(mmdeploy_sender_t input, mmdeploy_then_fn_t fn,
|
|
void* context) {
|
|
if (!input || !fn) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] {
|
|
return Take(Then(Take(input), [fn, context](Value args) {
|
|
auto out = Cast(fn(Take(std::move(args)), context));
|
|
Value ret(std::move(*out));
|
|
delete out;
|
|
return ret;
|
|
}));
|
|
});
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_let_value(mmdeploy_sender_t input, mmdeploy_let_value_fn_t fn,
|
|
void* context) {
|
|
if (!input || !fn) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] {
|
|
return Take(LetValue(Take(input), [fn, context](Value& args) {
|
|
auto out = Cast(fn(Cast(&args), context));
|
|
SenderType ret(std::move(*out));
|
|
delete out;
|
|
return ret;
|
|
}));
|
|
});
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_split(mmdeploy_sender_t input) {
|
|
if (!input) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(Split(Take(input))); });
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_when_all(mmdeploy_sender_t inputs[], int32_t n) {
|
|
if (!inputs) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] {
|
|
std::vector<SenderType> senders;
|
|
senders.reserve(n);
|
|
for (int i = 0; i < n; ++i) {
|
|
senders.emplace_back(Take(inputs[i]));
|
|
}
|
|
return Take(
|
|
Then(WhenAll(std::move(senders)), [](Value::Array&& v) { return Value(std::move(v)); }));
|
|
});
|
|
}
|
|
|
|
mmdeploy_sender_t mmdeploy_executor_ensure_started(mmdeploy_sender_t input) {
|
|
if (!input) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(EnsureStarted(Take(input))); });
|
|
}
|
|
|
|
int mmdeploy_executor_start_detached(mmdeploy_sender_t input) {
|
|
if (!input) {
|
|
return MM_E_INVALID_ARG;
|
|
}
|
|
try {
|
|
StartDetached(Take(input));
|
|
return 0;
|
|
} catch (...) {
|
|
}
|
|
return MM_E_FAIL;
|
|
}
|
|
|
|
mmdeploy_value_t mmdeploy_executor_sync_wait(mmdeploy_sender_t input) {
|
|
if (!input) {
|
|
return nullptr;
|
|
}
|
|
return Guard([&] { return Take(std::get<Value>(SyncWait(Take(input)))); });
|
|
}
|
|
|
|
int mmdeploy_executor_sync_wait_v2(mmdeploy_sender_t sender, mmdeploy_value_t* value) {
|
|
if (!sender) {
|
|
return MM_E_INVALID_ARG;
|
|
}
|
|
auto result = mmdeploy_executor_sync_wait(sender);
|
|
if (!result) {
|
|
return MM_E_FAIL;
|
|
}
|
|
if (value) {
|
|
*value = result;
|
|
} else {
|
|
mmdeploy_value_destroy(result);
|
|
}
|
|
return MM_SUCCESS;
|
|
}
|
|
|
|
void mmdeploy_executor_execute(mmdeploy_scheduler_t scheduler, void (*fn)(void*), void* context) {
|
|
Execute(*Cast(scheduler), [fn, context] { fn(context); });
|
|
}
|