remove unneeded functions
parent
4ab674b6cc
commit
beb998fed3
220
export.py
220
export.py
|
@ -117,22 +117,6 @@ def try_export(inner_func):
|
|||
return outer_func
|
||||
|
||||
|
||||
@try_export
|
||||
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
||||
# YOLOv5 TorchScript model export
|
||||
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
||||
f = file.with_suffix('.torchscript')
|
||||
|
||||
ts = torch.jit.trace(model, im, strict=False)
|
||||
d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
|
||||
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
||||
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
||||
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
|
||||
else:
|
||||
ts.save(str(f), _extra_files=extra_files)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
|
||||
# YOLOv5 ONNX export
|
||||
|
@ -211,44 +195,6 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
|
|||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
||||
# YOLOv5 Paddle export
|
||||
check_requirements(('paddlepaddle', 'x2paddle'))
|
||||
import x2paddle
|
||||
from x2paddle.convert import pytorch2paddle
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
|
||||
f = str(file).replace('.pt', f'_paddle_model{os.sep}')
|
||||
|
||||
pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
|
||||
# YOLOv5 CoreML export
|
||||
check_requirements('coremltools')
|
||||
import coremltools as ct
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
||||
f = file.with_suffix('.mlmodel')
|
||||
|
||||
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
||||
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
||||
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
||||
if bits < 32:
|
||||
if MACOS: # quantization only supported on macOS
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
else:
|
||||
print(f'{prefix} quantization only supported on macOS, skipping...')
|
||||
ct_model.save(f)
|
||||
return f, ct_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
||||
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
||||
|
@ -311,172 +257,6 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose
|
|||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_saved_model(model,
|
||||
im,
|
||||
file,
|
||||
dynamic,
|
||||
tf_nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25,
|
||||
keras=False,
|
||||
prefix=colorstr('TensorFlow SavedModel:')):
|
||||
# YOLOv5 TensorFlow SavedModel export
|
||||
try:
|
||||
import tensorflow as tf
|
||||
except Exception:
|
||||
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
from models.tf import TFModel
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = str(file).replace('.pt', '_saved_model')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
|
||||
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
||||
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
||||
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
|
||||
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
||||
keras_model.trainable = False
|
||||
keras_model.summary()
|
||||
if keras:
|
||||
keras_model.save(f, save_format='tf')
|
||||
else:
|
||||
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(spec)
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
tfm = tf.Module()
|
||||
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
|
||||
tfm.__call__(im)
|
||||
tf.saved_model.save(tfm,
|
||||
f,
|
||||
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
|
||||
tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
||||
return f, keras_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
|
||||
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = file.with_suffix('.pb')
|
||||
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
frozen_func.graph.as_graph_def()
|
||||
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
||||
# YOLOv5 TensorFlow Lite export
|
||||
import tensorflow as tf
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
f = str(file).replace('.pt', '-fp16.tflite')
|
||||
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
||||
converter.target_spec.supported_types = [tf.float16]
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
if int8:
|
||||
from models.tf import representative_dataset_gen
|
||||
dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
|
||||
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.target_spec.supported_types = []
|
||||
converter.inference_input_type = tf.uint8 # or tf.int8
|
||||
converter.inference_output_type = tf.uint8 # or tf.int8
|
||||
converter.experimental_new_quantizer = True
|
||||
f = str(file).replace('.pt', '-int8.tflite')
|
||||
if nms or agnostic_nms:
|
||||
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
|
||||
|
||||
tflite_model = converter.convert()
|
||||
open(f, 'wb').write(tflite_model)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
||||
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
|
||||
cmd = 'edgetpu_compiler --version'
|
||||
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
||||
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
|
||||
if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
|
||||
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
||||
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
||||
for c in (
|
||||
'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
|
||||
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
|
||||
'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
|
||||
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
|
||||
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
||||
f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
|
||||
f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
|
||||
|
||||
subprocess.run([
|
||||
'edgetpu_compiler',
|
||||
'-s',
|
||||
'-d',
|
||||
'-k',
|
||||
'10',
|
||||
'--out_dir',
|
||||
str(file.parent),
|
||||
f_tfl,], check=True)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
||||
# YOLOv5 TensorFlow.js export
|
||||
check_requirements('tensorflowjs')
|
||||
import tensorflowjs as tfjs
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
||||
f = str(file).replace('.pt', '_web_model') # js dir
|
||||
f_pb = file.with_suffix('.pb') # *.pb path
|
||||
f_json = f'{f}/model.json' # *.json path
|
||||
|
||||
args = [
|
||||
'tensorflowjs_converter',
|
||||
'--input_format=tf_frozen_model',
|
||||
'--quantize_uint8' if int8 else '',
|
||||
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
||||
str(f_pb),
|
||||
str(f),]
|
||||
subprocess.run([arg for arg in args if arg], check=True)
|
||||
|
||||
json = Path(f_json).read_text()
|
||||
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
||||
subst = re.sub(
|
||||
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
|
||||
r'"Identity_1": {"name": "Identity_1"}, '
|
||||
r'"Identity_2": {"name": "Identity_2"}, '
|
||||
r'"Identity_3": {"name": "Identity_3"}}}', json)
|
||||
j.write(subst)
|
||||
return f, None
|
||||
|
||||
|
||||
def add_tflite_metadata(file, metadata, num_outputs):
|
||||
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
|
||||
with contextlib.suppress(ImportError):
|
||||
|
|
Loading…
Reference in New Issue