mmdeploy/csrc/execution/schedulers/intrusive_queue.h
lzhangzz 46bfe0ac87
[Feature] New pipeline & executor for SDK (#497)
* executor prototype

* add split/when_all

* fix GCC build

* WIP let_value

* fix let_value

* WIP ensure_started

* ensure_started & start_detached

* fix let_value + when_all combo on MSVC 142

* fix static thread pool

* generic just, then, let_value, sync_wait

* minor

* generic split and when_all

* fully generic sender adapters

* when_all: workaround for GCC7

* support legacy spdlog

* fix memleak

* bulk

* static detector

* fix bulk & first pipeline

* bulk for static thread pools

* fix on MSVC

* WIP async batch submission

* WIP collation

* async batch

* fix detector

* fix async detector

* fix

* fix

* debug

* fix cuda allocator

* WIP type erased executor

* better type erasure

* simplify C API impl

* Expand & type erase TC

* deduction guide for type erased senders

* fix GCC build

* when_all for arrays of Value senders

* WIP pipeline v2

* WIP pipeline parser

* WIP timed batch operation

* add registry

* experiment

* fix pipeline

* naming

* fix mem-leak

* fix deferred batch operation

* WIP

* WIP configurable scheduler

* WIP configurable scheduler

* add comment

* parse scheduler config

* force link schedulers

* WIP pipeable sender

* WIP CPO

* ADL isolation and dismantle headers

* type erase single thread context

* fix MSVC build

* CPO

* replace decay_t with remove_cvref_t

* structure adjustment

* structure adjustment

* apply CPOs & C API rework

* refine C API

* detector async C API

* adjust detector async C API

* # Conflicts:
#	csrc/apis/c/detector.cpp

* fix when_all for type erased senders

* support void return for Then

* async detector

* fix some CPOs

* minor

* WIP rework capture mechanism for type erased types

* minor fix

* fix MSVC build

* move expand.h to execution

* make `Expand` pipeable

* fix type erased

* un-templatize `_TypeErasedOperation`

* re-work C API

* remove async_detector C API

* fix pipeline

* add flatten & unflatten

* fix flatten & unflatten

* add aync OCR demo

* config executor for nodes & better executor API

* working async OCR example

* minor

* dynamic batch via scheduler

* dynamic batch on `Value`

* fix MSVC build

* type erase dynamic batch scheduler

* sender as Python Awaitable

* naming

* naming

* add docs

* minor

* merge tmp branch

* unify C APIs

* fix ocr

* unify APIs

* fix typo

* update async OCR demo

* add v3 API text recognizer

* fix v3 API

* fix lint

* add license info & reformat

* add demo async_ocr_v2

* revert files

* revert files

* resolve link issues

* fix scheduler linkage for shared libs

* fix license header

* add docs for `mmdeploy_executor_split`

* add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute`

* make `TimedSingleThreadContext` header only

* fix lint

* simplify type-erased sender
2022-06-01 14:10:43 +08:00

116 lines
2.7 KiB
C++

/*
* Copyright (c) Facebook, Inc. and its affiliates.
* Copyright (c) NVIDIA
*
* Licensed under the Apache License Version 2.0 with LLVM Exceptions
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* https://llvm.org/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <tuple>
#include <utility>
namespace mmdeploy {
template <auto Next>
class intrusive_queue;
template <class Item, Item* Item::*Next>
class intrusive_queue<Next> {
public:
intrusive_queue() noexcept = default;
intrusive_queue(intrusive_queue&& other) noexcept
: head_(std::exchange((other.head_, nullptr))),
tail_(std::exchange((other.head_, nullptr))) {}
~intrusive_queue() { assert(empty()); }
static intrusive_queue MakeReversed(Item* list) noexcept {
Item* new_head = nullptr;
Item* new_tail = list;
while (list != nullptr) {
Item* next = list->*Next;
list->*Next = new_head;
new_head = list;
list = next;
}
intrusive_queue result;
result.head_ = new_head;
result.tail_ = new_tail;
}
bool empty() const noexcept { return head_ == nullptr; }
Item* pop_front() noexcept {
assert(!empty());
Item* item = std::exchange(head_, head_->*Next);
if (head_ == nullptr) {
tail_ = nullptr;
}
return item;
}
void push_front(Item* item) noexcept {
assert(item != nullptr);
item->*Next = head_;
head_ = item;
if (tail_ == nullptr) {
tail_ = item;
}
}
void push_back(Item* item) noexcept {
assert(item != nullptr);
item->*Next = nullptr;
if (tail_ == nullptr) {
head_ = item;
} else {
tail_->*Next = item;
}
tail_ = item;
}
void append(intrusive_queue other) noexcept {
if (other.empty()) {
return;
}
auto* other_head = std::exchange(other.head_, nullptr);
if (empty()) {
head_ = other_head;
} else {
tail_->*Next = other_head;
}
tail_ = std::exchange(other.tail_, nullptr);
}
void prepend(intrusive_queue other) noexcept {
if (other.empty()) {
return;
}
other.tail_->*Next = head_;
head_ = other.head_;
if (tail_ == nullptr) {
tail_ = other.tail_;
}
other.tail_ = nullptr;
other.head_ = nullptr;
}
private:
Item* head_ = nullptr;
Item* tail_ = nullptr;
};
} // namespace mmdeploy