modify
parent
add3ea238c
commit
7a88058a2f
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,384,384]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,384,384]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
|
@ -50,11 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
|
|||
-o Global.benchmark:False
|
||||
null:null
|
||||
null:null
|
||||
===========================train_benchmark_params==========================
|
||||
batch_size:104|128
|
||||
fp_items:fp32|fp16
|
||||
epoch:1
|
||||
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
|
||||
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
|
||||
===========================infer_benchmark_params==========================
|
||||
random_infer_input:[{float32,[3,256,256]}]
|
Loading…
Reference in New Issue