Merge pull request #1860 from weisy11/update_tipc_configs

update tipc configs
pull/1869/head
Wei Shengyu 2022-04-21 14:36:32 +08:00 committed by GitHub
commit 0242a0b841
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 34 additions and 6 deletions

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,384,384]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,384,384]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -56,3 +56,5 @@ fp_items:fp32
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -56,3 +56,5 @@ fp_items:fp32
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,256,256]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,256,256]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,256,256]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -56,3 +56,5 @@ fp_items:fp32
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,3 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]

View File

@ -50,9 +50,5 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.tran
-o Global.benchmark:True
null:null
null:null
===========================train_benchmark_params==========================
batch_size:128
fp_items:fp32
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,224,224]}]