mmdeploy/csrc/execution/schedulers/timed_single_thread_context.h
lzhangzz 46bfe0ac87
[Feature] New pipeline & executor for SDK (#497)
* executor prototype

* add split/when_all

* fix GCC build

* WIP let_value

* fix let_value

* WIP ensure_started

* ensure_started & start_detached

* fix let_value + when_all combo on MSVC 142

* fix static thread pool

* generic just, then, let_value, sync_wait

* minor

* generic split and when_all

* fully generic sender adapters

* when_all: workaround for GCC7

* support legacy spdlog

* fix memleak

* bulk

* static detector

* fix bulk & first pipeline

* bulk for static thread pools

* fix on MSVC

* WIP async batch submission

* WIP collation

* async batch

* fix detector

* fix async detector

* fix

* fix

* debug

* fix cuda allocator

* WIP type erased executor

* better type erasure

* simplify C API impl

* Expand & type erase TC

* deduction guide for type erased senders

* fix GCC build

* when_all for arrays of Value senders

* WIP pipeline v2

* WIP pipeline parser

* WIP timed batch operation

* add registry

* experiment

* fix pipeline

* naming

* fix mem-leak

* fix deferred batch operation

* WIP

* WIP configurable scheduler

* WIP configurable scheduler

* add comment

* parse scheduler config

* force link schedulers

* WIP pipeable sender

* WIP CPO

* ADL isolation and dismantle headers

* type erase single thread context

* fix MSVC build

* CPO

* replace decay_t with remove_cvref_t

* structure adjustment

* structure adjustment

* apply CPOs & C API rework

* refine C API

* detector async C API

* adjust detector async C API

* # Conflicts:
#	csrc/apis/c/detector.cpp

* fix when_all for type erased senders

* support void return for Then

* async detector

* fix some CPOs

* minor

* WIP rework capture mechanism for type erased types

* minor fix

* fix MSVC build

* move expand.h to execution

* make `Expand` pipeable

* fix type erased

* un-templatize `_TypeErasedOperation`

* re-work C API

* remove async_detector C API

* fix pipeline

* add flatten & unflatten

* fix flatten & unflatten

* add aync OCR demo

* config executor for nodes & better executor API

* working async OCR example

* minor

* dynamic batch via scheduler

* dynamic batch on `Value`

* fix MSVC build

* type erase dynamic batch scheduler

* sender as Python Awaitable

* naming

* naming

* add docs

* minor

* merge tmp branch

* unify C APIs

* fix ocr

* unify APIs

* fix typo

* update async OCR demo

* add v3 API text recognizer

* fix v3 API

* fix lint

* add license info & reformat

* add demo async_ocr_v2

* revert files

* revert files

* resolve link issues

* fix scheduler linkage for shared libs

* fix license header

* add docs for `mmdeploy_executor_split`

* add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute`

* make `TimedSingleThreadContext` header only

* fix lint

* simplify type-erased sender
2022-06-01 14:10:43 +08:00

203 lines
5.4 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
// Modified from
// https://github.com/facebookexperimental/libunifex/blob/main/include/unifex/timed_single_thread_context.hpp
#pragma once
#include "execution/execution.h"
namespace mmdeploy {
class TimedSingleThreadContext;
namespace _timed_single_thread_context {
using Clock = std::chrono::steady_clock;
using TimePoint = typename Clock::time_point;
struct TaskBase {
using ExecuteFn = void(TaskBase*) noexcept;
explicit TaskBase(TimedSingleThreadContext& context, ExecuteFn* execute) noexcept
: context_(&context), execute_(execute) {}
TimedSingleThreadContext* const context_;
TaskBase* next_{nullptr};
TaskBase** prev_next_ptr_{nullptr};
ExecuteFn* execute_;
TimePoint due_time_;
void Execute() noexcept { execute_(this); }
};
class Scheduler;
namespace __schedule_after {
template <typename Duration, typename Receiver>
struct _Operation {
struct type;
};
template <typename Duration, typename Receiver>
using operation_t = typename _Operation<Duration, remove_cvref_t<Receiver>>::type;
template <typename Duration>
struct _Sender {
struct type;
};
template <typename Duration>
using sender_t = typename _Sender<Duration>::type;
} // namespace __schedule_after
class Scheduler {
friend TimedSingleThreadContext;
explicit Scheduler(TimedSingleThreadContext& context) noexcept : context_(&context) {}
friend bool operator==(Scheduler a, Scheduler b) noexcept { return a.context_ == b.context_; }
friend bool operator!=(Scheduler a, Scheduler b) noexcept { return a.context_ != b.context_; }
TimedSingleThreadContext* context_;
template <typename Rep, typename Ratio>
friend auto ScheduleAfter(const Scheduler& self, std::chrono::duration<Rep, Ratio> delay) noexcept
-> __schedule_after::sender_t<std::chrono::duration<Rep, Ratio>> {
return {self.context_, delay};
}
template <typename Duration = std::chrono::microseconds>
friend __schedule_after::sender_t<Duration> tag_invoke(schedule_t,
const Scheduler& self) noexcept {
return {self.context_, Duration::zero()};
}
};
} // namespace _timed_single_thread_context
class MMDEPLOY_API TimedSingleThreadContext {
using Clock = _timed_single_thread_context::Clock;
using Scheduler = _timed_single_thread_context::Scheduler;
using TaskBase = _timed_single_thread_context::TaskBase;
template <typename Duration, typename Receiver>
friend struct _timed_single_thread_context::__schedule_after::_Operation;
friend Scheduler;
void Enqueue(TaskBase* task) noexcept {
bool need_notify = false;
{
std::lock_guard lock{mutex_};
if (head_ == nullptr || task->due_time_ < head_->due_time_) {
task->next_ = head_;
head_ = task;
need_notify = true;
} else {
auto* queued_task = head_;
// find insert pos
while (queued_task->next_ != nullptr && queued_task->next_->due_time_ <= task->due_time_) {
queued_task = queued_task->next_;
}
task->next_ = queued_task->next_;
queued_task->next_ = task;
}
}
if (need_notify) {
cv_.notify_one();
}
}
void Run() {
std::unique_lock lock{mutex_};
while (!stop_) {
if (head_ != nullptr) {
auto now = Clock::now();
auto next_due_time = head_->due_time_;
if (next_due_time <= now) {
// dequeue
auto* task = head_;
head_ = task->next_;
// execute
lock.unlock();
task->Execute();
lock.lock();
} else {
cv_.wait_until(lock, next_due_time);
}
} else {
cv_.wait(lock);
}
}
}
std::mutex mutex_;
std::condition_variable cv_;
TaskBase* head_{nullptr};
bool stop_{false};
std::thread thread_;
public:
TimedSingleThreadContext() : thread_([this] { this->Run(); }) {}
~TimedSingleThreadContext() {
{
std::lock_guard lock{mutex_};
stop_ = true;
cv_.notify_one();
}
thread_.join();
assert(head_ == nullptr);
}
Scheduler GetScheduler() noexcept { return Scheduler{*this}; }
std::thread::id GetThreadId() const noexcept { return thread_.get_id(); }
};
namespace _timed_single_thread_context::__schedule_after {
template <typename Duration, typename Receiver>
struct _Operation<Duration, Receiver>::type : TaskBase {
Duration duration_;
Receiver receiver_;
template <typename Receiver2>
type(TimedSingleThreadContext& context, Duration duration, Receiver2&& receiver)
: TaskBase(context, &type::ExecuteImpl),
duration_(duration),
receiver_((Receiver2 &&) receiver) {}
static void ExecuteImpl(TaskBase* p) noexcept {
auto& self = *static_cast<type*>(p);
SetValue((Receiver &&) self.receiver_);
}
void Enqueue() { context_->Enqueue(this); }
friend void tag_invoke(start_t, type& op_state) noexcept {
op_state.due_time_ = Clock::now() + op_state.duration_;
op_state.Enqueue();
}
};
template <typename Duration>
struct _Sender<Duration>::type {
using value_types = std::tuple<>;
TimedSingleThreadContext* context_;
Duration duration_;
template <typename Self, typename Receiver, _decays_to<Self, type, int> = 0>
friend operation_t<Duration, Receiver> tag_invoke(connect_t, Self&& self, Receiver&& receiver) {
return {*self.context_, self.duration_, (Receiver &&) receiver};
}
};
} // namespace _timed_single_thread_context::__schedule_after
} // namespace mmdeploy