PyTorch version to screen and cleanup (#1325)
* Create flatten_recursive() helper function * cleanup * print torch versionpull/1315/head
parent
81d320109f
commit
19e2482458
|
@ -1,11 +1,10 @@
|
|||
import argparse
|
||||
import logging
|
||||
import math
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
import math
|
||||
|
||||
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -74,7 +73,7 @@ class Model(nn.Module):
|
|||
|
||||
# Define model
|
||||
if nc and nc != self.yaml['nc']:
|
||||
print('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
|
||||
logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist, ch_out
|
||||
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
|
||||
|
@ -93,7 +92,7 @@ class Model(nn.Module):
|
|||
# Init weights, biases
|
||||
initialize_weights(self)
|
||||
self.info()
|
||||
print('')
|
||||
logger.info('')
|
||||
|
||||
def forward(self, x, augment=False, profile=False):
|
||||
if augment:
|
||||
|
|
3
test.py
3
test.py
|
@ -262,7 +262,8 @@ def test(data,
|
|||
print('ERROR: pycocotools unable to run: %s' % e)
|
||||
|
||||
# Return results
|
||||
print('Results saved to %s' % save_dir)
|
||||
if not training:
|
||||
print('Results saved to %s' % save_dir)
|
||||
model.float() # for training
|
||||
maps = np.zeros(nc) + map
|
||||
for i, c in enumerate(ap_class):
|
||||
|
|
|
@ -946,3 +946,11 @@ def create_folder(path='./new'):
|
|||
if os.path.exists(path):
|
||||
shutil.rmtree(path) # delete output folder
|
||||
os.makedirs(path) # make new output folder
|
||||
|
||||
|
||||
def flatten_recursive(path='../coco128'):
|
||||
# Flatten a recursive directory by bringing all files to top level
|
||||
new_path = Path(path + '_flat')
|
||||
create_folder(new_path)
|
||||
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
|
||||
shutil.copyfile(file, new_path / Path(file).name)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import logging
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
from copy import deepcopy
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.nn as nn
|
||||
|
@ -39,14 +39,13 @@ def select_device(device='', batch_size=None):
|
|||
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
|
||||
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
|
||||
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
|
||||
s = 'Using CUDA '
|
||||
s = f'Using torch {torch.__version__} '
|
||||
for i in range(0, ng):
|
||||
if i == 1:
|
||||
s = ' ' * len(s)
|
||||
logger.info("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
|
||||
(s, i, x[i].name, x[i].total_memory / c))
|
||||
logger.info("%sCUDA:%g (%s, %dMB)" % (s, i, x[i].name, x[i].total_memory / c))
|
||||
else:
|
||||
logger.info('Using CPU')
|
||||
logger.info(f'Using torch {torch.__version__} CPU')
|
||||
|
||||
logger.info('') # skip a line
|
||||
return torch.device('cuda:0' if cuda else 'cpu')
|
||||
|
@ -143,7 +142,7 @@ def model_info(model, verbose=False):
|
|||
from thop import profile
|
||||
flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
|
||||
fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
|
||||
except:
|
||||
except ImportError:
|
||||
fs = ''
|
||||
|
||||
logger.info(
|
||||
|
|
Loading…
Reference in New Issue