From 37ccd3683d81018aabd9fd81087398aeb662050f Mon Sep 17 00:00:00 2001
From: Darren <Dongji0105@hotmail.com>
Date: Mon, 19 Apr 2021 09:23:15 +0700
Subject: [PATCH] unify gen_wts.py and inference.cpp dummy test value. (#457)

Reviewed by: @L1aoXingyu
---
 projects/FastRT/tools/gen_wts.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/projects/FastRT/tools/gen_wts.py b/projects/FastRT/tools/gen_wts.py
index 1fd7b86..dbcaf86 100644
--- a/projects/FastRT/tools/gen_wts.py
+++ b/projects/FastRT/tools/gen_wts.py
@@ -95,13 +95,13 @@ if __name__ == '__main__':
     model.eval()
     
     if args.verify:
-        input = torch.ones(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(cfg.MODEL.DEVICE)
+        input = torch.ones(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(cfg.MODEL.DEVICE) * 255.
         out = model(input).view(-1).cpu().detach().numpy()
         print('[Model output]: \n', out) 
         
     if args.benchmark:
         start_time = time.time()
-        input = torch.ones(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(cfg.MODEL.DEVICE)
+        input = torch.ones(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(cfg.MODEL.DEVICE) * 255.
         for i in range(100):
             out = model(input).view(-1).cpu().detach()
         print("--- %s seconds ---" % ((time.time() - start_time)/100.) )