mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* executor prototype * add split/when_all * fix GCC build * WIP let_value * fix let_value * WIP ensure_started * ensure_started & start_detached * fix let_value + when_all combo on MSVC 142 * fix static thread pool * generic just, then, let_value, sync_wait * minor * generic split and when_all * fully generic sender adapters * when_all: workaround for GCC7 * support legacy spdlog * fix memleak * bulk * static detector * fix bulk & first pipeline * bulk for static thread pools * fix on MSVC * WIP async batch submission * WIP collation * async batch * fix detector * fix async detector * fix * fix * debug * fix cuda allocator * WIP type erased executor * better type erasure * simplify C API impl * Expand & type erase TC * deduction guide for type erased senders * fix GCC build * when_all for arrays of Value senders * WIP pipeline v2 * WIP pipeline parser * WIP timed batch operation * add registry * experiment * fix pipeline * naming * fix mem-leak * fix deferred batch operation * WIP * WIP configurable scheduler * WIP configurable scheduler * add comment * parse scheduler config * force link schedulers * WIP pipeable sender * WIP CPO * ADL isolation and dismantle headers * type erase single thread context * fix MSVC build * CPO * replace decay_t with remove_cvref_t * structure adjustment * structure adjustment * apply CPOs & C API rework * refine C API * detector async C API * adjust detector async C API * # Conflicts: # csrc/apis/c/detector.cpp * fix when_all for type erased senders * support void return for Then * async detector * fix some CPOs * minor * WIP rework capture mechanism for type erased types * minor fix * fix MSVC build * move expand.h to execution * make `Expand` pipeable * fix type erased * un-templatize `_TypeErasedOperation` * re-work C API * remove async_detector C API * fix pipeline * add flatten & unflatten * fix flatten & unflatten * add aync OCR demo * config executor for nodes & better executor API * working async OCR example * minor * dynamic batch via scheduler * dynamic batch on `Value` * fix MSVC build * type erase dynamic batch scheduler * sender as Python Awaitable * naming * naming * add docs * minor * merge tmp branch * unify C APIs * fix ocr * unify APIs * fix typo * update async OCR demo * add v3 API text recognizer * fix v3 API * fix lint * add license info & reformat * add demo async_ocr_v2 * revert files * revert files * resolve link issues * fix scheduler linkage for shared libs * fix license header * add docs for `mmdeploy_executor_split` * add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute` * make `TimedSingleThreadContext` header only * fix lint * simplify type-erased sender
143 lines
4.9 KiB
C++
143 lines
4.9 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "execution/schedulers/dynamic_batch_scheduler.h"
|
|
#include "execution/schedulers/inlined_scheduler.h"
|
|
#include "execution/schedulers/registry.h"
|
|
#include "execution/schedulers/single_thread_context.h"
|
|
#include "execution/schedulers/static_thread_pool.h"
|
|
#include "execution/schedulers/timed_single_thread_context.h"
|
|
|
|
namespace mmdeploy {
|
|
|
|
using Scheduler = TypeErasedScheduler<Value>;
|
|
|
|
class InlineSchedulerCreator : public Creator<Scheduler> {
|
|
public:
|
|
const char* GetName() const override { return "Inline"; }
|
|
int GetVersion() const override { return 0; }
|
|
ReturnType Create(const Value&) override { return ReturnType{InlineScheduler{}}; }
|
|
};
|
|
|
|
REGISTER_MODULE(Scheduler, InlineSchedulerCreator);
|
|
|
|
namespace {
|
|
|
|
// Create type-erased scheduler by calling Context::GetScheduler and then move the context into the
|
|
// deleter of the impl ptr of the type-erased scheduler
|
|
template <class Context>
|
|
Scheduler CreateFromContext(std::unique_ptr<Context> context) {
|
|
using SchedType = decltype(context->GetScheduler());
|
|
using EraseType = _type_erased::TypeErasedSchedulerImpl<SchedType, Value>;
|
|
auto sched = new EraseType(context->GetScheduler());
|
|
return Scheduler{std::shared_ptr<Scheduler::Impl>(
|
|
sched, [context = std::move(context)](EraseType* p) { delete p; })};
|
|
}
|
|
|
|
} // namespace
|
|
|
|
class SingleThreadSchedCreator : public Creator<Scheduler> {
|
|
public:
|
|
const char* GetName() const override { return "SingleThread"; }
|
|
int GetVersion() const override { return 0; }
|
|
ReturnType Create(const Value&) override {
|
|
return CreateFromContext(std::make_unique<_single_thread_context::SingleThreadContext>());
|
|
}
|
|
};
|
|
|
|
REGISTER_MODULE(Scheduler, SingleThreadSchedCreator);
|
|
|
|
class StaticThreadPoolSchedCreator : public Creator<Scheduler> {
|
|
public:
|
|
const char* GetName() const override { return "ThreadPool"; }
|
|
int GetVersion() const override { return 0; }
|
|
ReturnType Create(const Value& cfg) override {
|
|
auto num_threads = -1;
|
|
if (cfg.is_object() && cfg.contains("num_threads")) {
|
|
num_threads = cfg["num_threads"].get<int>();
|
|
}
|
|
if (num_threads >= 1) {
|
|
return CreateFromContext(
|
|
std::make_unique<__static_thread_pool::StaticThreadPool>(num_threads));
|
|
} else {
|
|
return CreateFromContext(std::make_unique<__static_thread_pool::StaticThreadPool>());
|
|
}
|
|
}
|
|
};
|
|
|
|
REGISTER_MODULE(Scheduler, StaticThreadPoolSchedCreator);
|
|
|
|
struct ValueAssembler {
|
|
using range_t = std::pair<size_t, size_t>;
|
|
|
|
static size_t get_size(const Value& x) { return x.empty() ? 0 : x.front().size(); }
|
|
|
|
template <typename ValueType>
|
|
static void input(std::tuple<ValueType> _src, range_t src_range, std::tuple<Value>& _dst,
|
|
range_t dst_range, size_t batch_size) {
|
|
auto& [src] = _src;
|
|
auto& [dst] = _dst;
|
|
if (dst.empty()) {
|
|
dst = std::move(src);
|
|
for (auto& x : dst) {
|
|
x.array().reserve(batch_size);
|
|
}
|
|
return;
|
|
}
|
|
auto& u = src.array();
|
|
auto& v = dst.array();
|
|
assert(u.size() == v.size());
|
|
assert(dst_range.first = v.front().size());
|
|
for (size_t k = 0; k < src.size(); ++k) {
|
|
auto& x = u[k].array();
|
|
auto& y = v[k].array();
|
|
std::copy(std::begin(x) + src_range.first, std::begin(x) + src_range.first + src_range.second,
|
|
std::back_inserter(y));
|
|
}
|
|
}
|
|
|
|
static void output(Value& src, range_t src_range, Value& dst, range_t dst_range,
|
|
size_t batch_size) {
|
|
if (dst.empty()) {
|
|
dst = Value::Array(src.size(), Value::Array(batch_size));
|
|
}
|
|
auto& u = src.array();
|
|
auto& v = dst.array();
|
|
assert(u.size() == v.size());
|
|
for (size_t k = 0; k < src.size(); ++k) {
|
|
auto& x = u[k].array();
|
|
auto& y = v[k].array();
|
|
std::move(std::begin(x) + src_range.first, std::begin(x) + src_range.first + src_range.second,
|
|
std::begin(y) + dst_range.first);
|
|
}
|
|
}
|
|
};
|
|
|
|
TimedSingleThreadContext& gTimedSingleThreadContext() {
|
|
static TimedSingleThreadContext context{};
|
|
return context;
|
|
}
|
|
|
|
class DynamicBatchSchedCreator : public Creator<Scheduler> {
|
|
public:
|
|
const char* GetName() const override { return "DynamicBatch"; }
|
|
int GetVersion() const override { return 0; }
|
|
ReturnType Create(const Value& cfg) override {
|
|
using SchedulerType =
|
|
DynamicBatchScheduler<InlineScheduler, TypeErasedScheduler<Value>, ValueAssembler>;
|
|
auto scheduler = cfg["scheduler"].get<TypeErasedScheduler<Value>>();
|
|
auto max_batch_size = cfg["max_batch_size"].get<int>();
|
|
|
|
TimedSingleThreadContext* timer{};
|
|
auto timeout = cfg["timeout"].get<int>();
|
|
if (timeout >= 0) {
|
|
timer = &gTimedSingleThreadContext();
|
|
}
|
|
return ReturnType{SchedulerType{inline_scheduler, std::move(scheduler), timer,
|
|
(size_t)max_batch_size, std::chrono::microseconds(timeout)}};
|
|
}
|
|
};
|
|
|
|
REGISTER_MODULE(Scheduler, DynamicBatchSchedCreator);
|
|
|
|
} // namespace mmdeploy
|