// Copyright (c) OpenMMLab. All rights reserved. #ifndef MMDEPLOY_SRC_NET_PPL_PPL_NET_H_ #define MMDEPLOY_SRC_NET_PPL_PPL_NET_H_ #include "core/mpl/span.h" #include "core/net.h" #include "ppl/nn/engines/engine.h" #include "ppl/nn/runtime/runtime.h" namespace mmdeploy { using PPLTensor = ppl::nn::Tensor; class PPLNet : public Net { public: ~PPLNet() override; Result Init(const Value& args) override; Result Deinit() override; Result Reshape(Span input_shapes) override; Result > GetInputTensors() override; Result > GetOutputTensors() override; Result Forward() override; Result ForwardAsync(Event* event) override; static Result > InferOutputShapes(Span input_shapes, Span prev_in_shapes, Span prev_out_shapes); private: static Tensor CreateInternalTensor(ppl::nn::Tensor* src, Device device); static Result GetBatchSize(Span shapes); static std::vector GetShapes(Span tensors); Device device_; Stream stream_; std::vector > engines_; std::vector inputs_external_; std::vector outputs_external_; std::vector inputs_internal_; std::vector outputs_internal_; std::unique_ptr runtime_; bool can_infer_output_shapes_{false}; static constexpr const auto kHost = Device(0); }; } // namespace mmdeploy #endif // MMDEPLOY_SRC_NET_PPL_PPL_NET_H_