mmdeploy/service/snpe/client/inference_pb2_grpc.py

210 lines
7.8 KiB
Python

# Copyright (c) OpenMMLab. All rights reserved.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import inference_pb2 as inference__pb2
class InferenceStub(object):
"""The inference service definition."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Echo = channel.unary_unary(
'/mmdeploy.Inference/Echo',
request_serializer=inference__pb2.Empty.SerializeToString,
response_deserializer=inference__pb2.Reply.FromString,
)
self.Init = channel.unary_unary(
'/mmdeploy.Inference/Init',
request_serializer=inference__pb2.Model.SerializeToString,
response_deserializer=inference__pb2.Reply.FromString,
)
self.OutputNames = channel.unary_unary(
'/mmdeploy.Inference/OutputNames',
request_serializer=inference__pb2.Empty.SerializeToString,
response_deserializer=inference__pb2.Names.FromString,
)
self.Inference = channel.unary_unary(
'/mmdeploy.Inference/Inference',
request_serializer=inference__pb2.TensorList.SerializeToString,
response_deserializer=inference__pb2.Reply.FromString,
)
self.Destroy = channel.unary_unary(
'/mmdeploy.Inference/Destroy',
request_serializer=inference__pb2.Empty.SerializeToString,
response_deserializer=inference__pb2.Reply.FromString,
)
class InferenceServicer(object):
"""The inference service definition."""
def Echo(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Init(self, request, context):
"""Init Model with model file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OutputNames(self, request, context):
"""Get output names."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Inference(self, request, context):
"""Inference with inputs."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Destroy(self, request, context):
"""Destroy handle."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InferenceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Echo':
grpc.unary_unary_rpc_method_handler(
servicer.Echo,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'Init':
grpc.unary_unary_rpc_method_handler(
servicer.Init,
request_deserializer=inference__pb2.Model.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'OutputNames':
grpc.unary_unary_rpc_method_handler(
servicer.OutputNames,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Names.SerializeToString,
),
'Inference':
grpc.unary_unary_rpc_method_handler(
servicer.Inference,
request_deserializer=inference__pb2.TensorList.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'Destroy':
grpc.unary_unary_rpc_method_handler(
servicer.Destroy,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mmdeploy.Inference', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler, ))
# This class is part of an EXPERIMENTAL API.
class Inference(object):
"""The inference service definition."""
@staticmethod
def Echo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Echo',
inference__pb2.Empty.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
@staticmethod
def Init(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Init',
inference__pb2.Model.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
@staticmethod
def OutputNames(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/OutputNames',
inference__pb2.Empty.SerializeToString,
inference__pb2.Names.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
@staticmethod
def Inference(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Inference',
inference__pb2.TensorList.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
@staticmethod
def Destroy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Destroy',
inference__pb2.Empty.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)