detect.py: support of txt file list

FL with configured weighted loss : alpha
test.py: tSNE over the cropped bbox features
pull/2071/head
hanoch 2025-02-16 10:03:45 +02:00
parent 30c9907884
commit 6b8d33fa31
19 changed files with 68254 additions and 50 deletions

View File

@ -1,8 +1,8 @@
lr0: 0.01 #0.001 # initial learning rate (SGD=1E-2, Adam=1E-3)
lr0: 0.005 #0.001 # initial learning rate (SGD=1E-2, Adam=1E-3)
lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.005 # optimizer weight decay 5e-4 It resolve mAP of overfitting test
warmup_epochs: 0.0 # warmup epochs (fractions ok)
warmup_epochs: 3.0 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.001 #0.001 # warmup initial bias lr
loss_ota: 1 #1 # use ComputeLossOTA, use 0 for faster training

View File

@ -0,0 +1,18 @@
# COCO 2017 dataset http://cocodataset.org
# download command/URL (optional)
#Make symbolic link
# sudo ln -s ~hanoch/projects/tir_frames_rois /mnt/Data/hanoch/tir_frames_rois
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ./yolov7/tir_od/training_set_yolov7941_w_center_roi_no_swiss_no_38B_20b_22c_n_png.txt #training_set_yolov7941_w_center_roi_no_swiss_no_38B_20b_22c_w_png.txt
val: ./yolov7/tir_od/tir_tiff_w_center_roi_validation_set.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 2
# class names
names: ['car', 'person']

View File

@ -0,0 +1,18 @@
# COCO 2017 dataset http://cocodataset.org
# download command/URL (optional)
#Make symbolic link
# sudo ln -s ~hanoch/projects/tir_frames_rois /mnt/Data/hanoch/tir_frames_rois
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ./yolov7/tir_od/training_set_yolov7941_w_center_roi_no_swiss_no_38B_20b_22c_n_png_train_3_cls_jan_25.txt # training_set_yolov7941_w_center_roi_no_swiss_no_38B_20b_22c_n_png.txt #./yolov7/tir_od/training_set_yolov7941_w_center_roi.txt
val: ./yolov7/tir_od/tir_tiff_w_center_roi_validation_set_train_cls_usa.txt #tir_tiff_w_center_roi_validation_set.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 3
# class names
names: ['car', 'person', 'train']

View File

@ -0,0 +1,18 @@
# COCO 2017 dataset http://cocodataset.org
# download command/URL (optional)
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_tiff_tiff_files #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
#train: ./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # ./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_training_set.txt #./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # 118287 images
#val: ./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
test: ./yolov7/tir_od/fog_data_set.txt #Test51a_Test40A_test_set.txt #Test51a_Test40A_test_set_tiny.txt #Test51a_Test40A_test_set.txt # data at tir_tiff_tiff_files
#test: ./yolov7/tir_od/test_set/tir_tiff_tiff_folder_test_set.txt # data at tir_tiff_tiff_files
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 2
# class names
names: ['car', 'person']

View File

@ -5,14 +5,14 @@
# sudo ln -s ~hanoch/projects/tir_frames_rois /mnt/Data/hanoch/tir_frames_rois
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ./yolov7/tir_od/training_set_overfit.txt #./yolov7/tir_od/training_set_yolov7941_w_center_roi.txt
val: ./yolov7/tir_od/training_set_overfit.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
train: ./yolov7/tir_od/training_set_train_only_overfit.txt #training_set_overfit.txt #./yolov7/tir_od/training_set_yolov7941_w_center_roi.txt
val: ./yolov7/tir_od/training_set_train_only_overfit.txt #training_set_overfit.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 2
nc: 3
# class names
names: ['car', 'person']
names: ['car', 'person', 'train']

View File

@ -0,0 +1,18 @@
# COCO 2017 dataset http://cocodataset.org
# download command/URL (optional)
#Make symbolic link
# sudo ln -s ~hanoch/projects/tir_frames_rois /mnt/Data/hanoch/tir_frames_rois
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ./yolov7/tir_od/training_set_overfit.txt #./yolov7/tir_od/training_set_yolov7941_w_center_roi.txt
val: ./yolov7/tir_od/training_set_overfit.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 3
# class names
names: ['car', 'person', 'Train']

View File

@ -7,7 +7,7 @@ path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects
#train: ./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # ./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_training_set.txt #./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # 118287 images
#val: ./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
test: ./yolov7/tir_od/test_set/Test51a_Test40A_test_set.txt # data at tir_tiff_tiff_files
test: ./yolov7/tir_od/test_set/Test51a_Test40A_test_set.txt #Test51a_Test40A_test_set_tiny.txt #Test51a_Test40A_test_set.txt # data at tir_tiff_tiff_files
#test: ./yolov7/tir_od/test_set/tir_tiff_tiff_folder_test_set.txt # data at tir_tiff_tiff_files
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

View File

@ -0,0 +1,18 @@
# COCO 2017 dataset http://cocodataset.org
# download command/URL (optional)
path: /mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all #/home/hanoch/projects/tir_frames_rois/tir_tiff_tiff_files #/home/hanoch/projects/tir_frames_rois/tir_car_44person_31 #/home/hanochk/tir_frames_rois/yolo7_tir_data
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
#train: ./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # ./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_training_set.txt #./yolov7/tir_od/training_set.txt #./yolov7/tir_od/training_set.txt # 118287 images
#val: ./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/tir_tiff_car_person_min_size_44_31_validation_set.txt #./yolov7/tir_od/validation_set.txt #./yolov7/tir_od/val_tir_od.txt #./yolov7/tir_od/validation_set.txt # 5000 images
test: ./yolov7/tir_od/test_set/Test51a_Test40A_test_set.txt #Test51a_Test40A_test_set_tiny.txt #Test51a_Test40A_test_set.txt # data at tir_tiff_tiff_files
#test: ./yolov7/tir_od/test_set/tir_tiff_tiff_folder_test_set.txt # data at tir_tiff_tiff_files
#test: ./tir_od/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 3
# class names
names: ['car', 'person', 'train']

View File

@ -21,10 +21,10 @@ from utils.torch_utils import select_device, load_classifier, time_synchronized,
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
save_img = not opt.nosave #and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.lower().startswith( # removed HK or source.endswith('.txt')
('rtsp://', 'rtmp://', 'http://', 'https://'))
webcam = webcam and not source.endswith('.txt')
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
@ -61,7 +61,8 @@ def detect(save_img=False):
dataset = LoadImages(source, img_size=imgsz, stride=stride,
scaling_type=opt.norm_type, input_channels=opt.input_channels,
no_tir_signal=opt.no_tir_signal,
tir_channel_expansion=opt.tir_channel_expansion)
tir_channel_expansion=opt.tir_channel_expansion,
rel_path_for_list_files=opt.rel_path_for_list_files)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
@ -218,6 +219,9 @@ if __name__ == '__main__':
parser.add_argument('--save-path', default='', help='save to project/name')
parser.add_argument('--rel-path-for-list-files', default='/mnt/Data/hanoch/tir_frames_rois/yolo7_tir_data_all', help='')
opt = parser.parse_args()
@ -229,7 +233,6 @@ if __name__ == '__main__':
print(opt)
#check_requirements(exclude=('pycocotools', 'thop'))
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov7.pt']:
@ -237,6 +240,7 @@ if __name__ == '__main__':
strip_optimizer(opt.weights)
else:
detect()
# --source yolov7/tir_od/fog_data_set.txt
"""
python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inference/images/horses.jpg

45
test.py
View File

@ -17,7 +17,7 @@ from utils.metrics import ap_per_class, ConfusionMatrix, range_bar_plot, range_p
from utils.plots import plot_images, output_to_target, plot_study_txt, append_to_txt
from utils.torch_utils import select_device, time_synchronized, TracedModel
import pandas as pd
from yolo_object_embeddings import ObjectEmbeddingVisualizer
def object_size_to_range(obj_height_pixels: float, focal:int, class_id:int=1):
class_height = {0:1.5, 1:1.8} # car Sedan height = 1.5 m , person height is 1.8m
@ -104,7 +104,7 @@ def test(data,
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
embed_analyse = kwargs.get('embed_analyse', False)
if not training:
if device.type != 'cpu':
model(torch.zeros(1, opt.input_channels, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
@ -123,12 +123,21 @@ def test(data,
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, hyp, pad=0.5, augment=False, rect=False, #rect was True # HK@@@ TODO : why pad =0.5?? only effective in rect=True in test time ? https://github.com/ultralytics/ultralytics/issues/13271
prefix=colorstr(f'{task}: '), rel_path_images=data['path'], num_cls=data['nc'])[0]
labels = np.concatenate(dataloader.dataset.labels, 0)
class_labels = torch.tensor(labels[:, 0]) # classes
if v5_metric:
print("Testing with YOLOv5 AP metric...")
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc, conf=conf_thres, iou_thres=iou_thres) # HK per conf per iou_thresh
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
if not training:
print(100 * '==')
print('Test set labels {} count : {}'.format(names, torch.bincount(class_labels.long(), minlength=nc) + 1))
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
@ -145,8 +154,12 @@ def test(data,
'score'
])
stats_all_large, stats_person_medium = [], []
if embed_analyse:
obj_embed_viz = ObjectEmbeddingVisualizer(model=model, device=device)
features_acm = torch.empty((0, 1024)) # embedding dim of last scale 1024x20x20
labels_acm = np.array([])
stats_all_large, stats_person_medium = [], []
if dataloader.dataset.use_csv_meta_data_file:
n_bins_of100m = 20
@ -209,8 +222,16 @@ def test(data,
# out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=False) # Does thresholding for class : list of detections, on (n,6) tensor per image [xyxy, conf, cls]
t1 += time_synchronized() - t
if trace and embed_analyse and np.sum([x.numel() for x in out])>0: # features are being saved/clone in the trace model version only TODO for others
features, labels = obj_embed_viz.extract_object_grounded_features(feature_maps=model.features,
predictions=out,
image_shape=img.shape)
features_acm = torch.cat((features_acm, features.detach().cpu()), dim=0)
labels_acm = np.concatenate((labels_acm, labels), axis=0)
# Statistics per image
for si, pred in enumerate(out): # [bbox_coors, objectness_logit, class]
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
@ -422,6 +443,11 @@ def test(data,
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if trace and embed_analyse:
embeddings = obj_embed_viz.visualize_object_embeddings(features_acm,
labels_acm,
path=save_dir,
tag=opt.conf_thres)
if not training or 1:
stats_person_medium = [np.concatenate(x, 0) for x in zip(*stats_person_medium)] # to numpy
@ -782,6 +808,9 @@ if __name__ == '__main__':
parser.add_argument('--csv-metadata-path', default='', help='save to project/name')
parser.add_argument('--embed-analyse', action='store_true', help='')
opt = parser.parse_args()
if opt.tir_channel_expansion: # operates over 3 channels
@ -812,7 +841,8 @@ if __name__ == '__main__':
save_conf=opt.save_conf,
trace=not opt.no_trace,
v5_metric=opt.v5_metric,
hyp=hyp)
hyp=hyp,
embed_analyse=opt.embed_analyse)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
@ -867,9 +897,12 @@ mAP:
Fixed wether csv P/R
--weights /mnt/Data/hanoch/runs/train/yolov7999/weights/best.pt --device 0 --batch-size 16 --data data/tir_od_test_set.yaml --img-size 640 --verbose --norm-type single_image_percentile_0_1 --input-channels 1 --project test --task test --csv-metadata-path tir_od/tir_tiff_seq_png_3_class_fixed_whether.xlsx --iou-thres 0.6 --conf 0.65
FOG
--weights /mnt/Data/hanoch/runs/train/yolov7999/weights/best.pt --device 0 --batch-size 16 --data data/tir_od_fog_set.yaml --img-size 640 --verbose --norm-type single_image_percentile_0_1 --input-channels 1 --project test --task test --csv-metadata-path tir_od/tir_tiff_seq_png_3_class_fixed_whether.xlsx --conf 0.01 --iou-thres 0.6
--weights /mnt/Data/hanoch/runs/train/yolov7999/weights/best.pt --device 0 --batch-size 16 --data data/tir_od_fog_set.yaml --img-size 640 --verbose --norm-type single_image_percentile_0_1 --input-channels 1 --project test --task test --csv-metadata-path tir_od/tir_tiff_seq_png_3_class_fixed_whether.xlsx --conf 0.65 --iou-thres 0.6
Locomotive
--weights /mnt/Data/hanoch/runs/train/yolov71107/weights/best.pt --device 0 --batch-size 16 --data data/tir_od_test_set_3_class_train.yaml --img-size 640 --verbose --norm-type single_image_percentile_0_1 --input-channels 1 --project test --task test --iou-thres 0.6 --conf 0.1 --embed-analyse
------- Error analysis ------------
1st run with conf_th=0.0001 then observe the desired threshold, re-run with the desired threshold abd observe images with bboxes given the deired threshold
"""

File diff suppressed because it is too large Load Diff

View File

@ -1056,7 +1056,6 @@
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_left_roi_210_2479.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_right_roi_210_2479.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_left_roi_210_2480.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_right_roi_210_2480.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_left_roi_210_2481.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_left_roi_210_2482.tiff
./TIR5_V60_DEC21_Test51A_ML_RD_IL_2021_11_07_14_27_00_FS210_XGA_1951_2510_LIEL_left_roi_210_2483.tiff
@ -1190,23 +1189,14 @@
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8348.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8348.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8351.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8351.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8355.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8355.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8358.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8358.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8361.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8361.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8364.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8364.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8367.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8367.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8371.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8371.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8375.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8375.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8379.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_right_roi_210_8379.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8384.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8389.tiff
./TIR1_v60_JAN22_TEST51A_ML_RD_IL_2021_11_04_03_25_33_FS_210_XGA_8229_8982_Shaked_left_roi_210_8394.tiff

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@ import logging
import math
import os
import random
import re
import time
from copy import deepcopy
from pathlib import Path
@ -20,7 +21,7 @@ from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import re
import test # import test.py to get mAP after each epoch
try:
@ -40,25 +41,26 @@ from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_img_size, \
print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss, ComputeLossOTA
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.plots import plot_images, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
logger = logging.getLogger(__name__)
clear_ml = True
from clearml import Task, Logger
from clearml import Task
if clear_ml: # clearml support
task = Task.init(
project_name="TIR_OD",
task_name="train yolov7 with dummy test" # output_uri = True model torch.save will uploaded to file server or =/mnt/myfolder or AWS or Azure
)
task_name="train yolov7 'train' class" # output_uri = True model torch.save will uploaded to file server or =/mnt/myfolder or AWS or Azure
# output_uri='azure://company.blob.core.windows.net/folder'
)
# Task.execute_remotely() will invoke the job immidiately over the remote and not DeV
task.set_base_docker(docker_image="nvcr.io/nvidia/pytorch:24.09-py3", docker_arguments="--shm-size 8G")
# clear_ml can capture graph like tensorboard
@ -413,6 +415,7 @@ def train(hyp, opt, device, tb_writer=None):
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
class_inverse_freq = labels_to_class_weights(dataset.labels, nc).to(device)
model.names = names
# Start training
@ -430,8 +433,14 @@ def train(hyp, opt, device, tb_writer=None):
else:
scaler = torch.amp.GradScaler("cuda", enabled=opt.amp) if is_torch_240 else torch.cuda.amp.GradScaler(enabled=opt.amp)
compute_loss_ota = ComputeLossOTA(model) # init loss class
compute_loss = ComputeLoss(model) # init loss class
loss_weight = torch.tensor([])
if opt.loss_weight:
loss_weight = class_inverse_freq
if 'loss_ota' not in hyp or hyp['loss_ota'] == 1:
compute_loss_ota = ComputeLossOTA(model, loss_weight=loss_weight) # init loss class
else:
compute_loss = ComputeLoss(model, loss_weight=loss_weight) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
@ -448,6 +457,8 @@ def train(hyp, opt, device, tb_writer=None):
if 0: # HK TODO remove later The anomaly mode tells you about the nan. If you remove this and you have the nan error again, you should have an additional stack trace that tells you about the forward function (make sure to enable the anomaly mode before the you run the forward).
torch.autograd.set_detect_anomaly(True)
print(100 * '==')
print('Training set labels {} count : {}'.format(names, torch.bincount(c.long(), minlength=nc) + 1))
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
@ -479,8 +490,10 @@ def train(hyp, opt, device, tb_writer=None):
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
# print(np.unique(targets, return_counts=True))
# print(np.bincount(targets[:,1].long(), minlength=nc))
ni = i + nb * epoch # number integrated batches (since train start) i.e. iterations
# imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 @@HK TODO is that standartization ?
imgs = imgs.to(device, non_blocking=True).float()
# Warmup
@ -504,7 +517,7 @@ def train(hyp, opt, device, tb_writer=None):
with amp.autocast(enabled=cuda): # to decrease GPU VRAM turn off OTA loss see what happen HT TODO ::
# with amp.autocast(enabled=(cuda and opt.amp)):
pred = model(imgs) # forward
pred = model(imgs) # forward [B, C, W,H, [bbox[4], objectness[1], class-conf[nc]]]
if 'loss_ota' not in hyp or hyp['loss_ota'] == 1:
loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size
else:
@ -761,6 +774,10 @@ if __name__ == '__main__':
parser.add_argument('--csv-metadata-path', default='', help='save to project/name')
parser.add_argument('--loss-weight', action='store_true', help='weight the loss by 1/freq to compensate for imbalanced data')
parser.add_argument('--embed-analyse', action='store_true', help='')
opt = parser.parse_args()
# Only for clearML env
@ -966,7 +983,8 @@ Overfit 640x640
tir_od_overfit.yaml
--workers 8 --device 0 --batch-size 32 --data data/tir_od_overfit.yaml --img-size 640 --weights /mnt/Data/hanoch/tir_frames_rois/yolov7.pt --cfg cfg/training/yolov7.yaml --name yolov7 --hyp hyp.tir_od_v7_overfit.yaml --adam --norm-type single_image_percentile_0_1 --input-channels 1 --linear-lr --epochs 100 --nosave --gamma-aug-prob 0.2 --cache-images
# 3 classe renew yolov7999 list
--workers 8 --device 0 --batch-size 24 --data data/tir_od_center_roi_aug_list_train_cls.yaml --img 640 640 --weights /mnt/Data/hanoch/tir_frames_rois/yolov7.pt --cfg cfg/training/yolov7.yaml --name yolov7 --hyp hyp.tir_od.tiny_aug_gamma_scaling_before_mosaic_rnd_scaling_no_ota.yaml --adam --norm-type single_image_percentile_0_1 --input-channels 1 --linear-lr --epochs 100 --gamma-aug-prob 0.1 --cache-images --image-weights
#########################################################
Extended model for higher resolution YOLO7E6
# --workers 8 --device 0 --batch-size 8 --data data/tir_od_center_roi_aug_list_full_res.yaml --weights /mnt/Data/hanoch/tir_frames_rois/yolov7-e6.pt --img-size [768, 1024] --cfg cfg/deploy/yolov7-e6.yaml --name yolov7e --hyp hyp.tir_od.aug_gamma_scaling_before_mosaic_rnd_scaling_e6_full_res.yaml --adam --norm-type single_image_percentile_0_1 --input-channels 1 --linear-lr --epochs 2 --gamma-aug-prob 0.3 --cache-images --rect

View File

@ -237,7 +237,8 @@ class _RepeatSampler(object):
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32,
scaling_type='standardization', img_percentile_removal=0.3, beta=0.3, input_channels=3,
tir_channel_expansion=False, no_tir_signal=False):
tir_channel_expansion=False, no_tir_signal=False,
rel_path_for_list_files=''):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
@ -245,7 +246,11 @@ class LoadImages: # for inference
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
if path.endswith('.txt'):
files = self.parse_image_file_names(path, rel_path_for_list_files)
else:
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
@ -273,6 +278,36 @@ class LoadImages: # for inference
self.tir_channel_expansion = tir_channel_expansion
self.is_tir_signal = not (no_tir_signal)
def parse_image_file_names(self, path, rel_path_for_list_files):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
if bool(rel_path_for_list_files):
f += [os.path.join(rel_path_for_list_files, x.replace('./', '')).rstrip() if x.startswith(
'./') else x for x
in t] # local to global path
else:
f += [x.replace('./', parent).rstrip() if x.startswith('./') else x for x in
t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{p} does not exist')
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f' No images found'
except Exception as e:
raise Exception(f'Error loading data from {path}: {e}\nSee {help_url}')
return f
def __iter__(self):
self.count = 0
@ -677,7 +712,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
if self.use_csv_meta_data_file:
df = load_csv_xls_2_df(self.csv_meta_data_file)
self.df_metadata = pd.DataFrame(columns=['sensor_type', 'part_in_day', 'weather_condition', 'country', 'train_state', 'tir_frame_image_file_name'])
# TODO :HK @@ itereate tqdm(zip(self.img_files, self.label_files) and upon --force-csv-list remove missing entries in the csv from train/test lists!!!
# TODO :HK @@ itereate tqdm(zip(self.img_files, self.label_files) and upon --force-csv-list remove missing entries from the csv in train/test lists!!!
for ix, fname in enumerate(self.img_files):
file_name = fname.split('/')[-1]
if not (df['tir_frame_image_file_name'] == file_name).any():

View File

@ -147,7 +147,17 @@ class FocalLoss(nn.Module):
else: # 'none'
return loss
"""
https://github.com/ultralytics/ultralytics/issues/10406
Define a list of alpha values:
alpha_list = [0.25, 0.5, 0.75] # Example alpha values for three classes
Modify the focal loss calculation in your models loss function:
alpha_factor = torch.tensor([alpha_list[i] for i in labels]).to(device)
alpha_factor = alpha_factor * labels + (1 - alpha_factor) * (1 - labels)
loss *= alpha_factor
"""
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
@ -421,7 +431,7 @@ class APLoss(torch.autograd.Function):
# Dual obj and cls losses and outputs inherited from Joseph Redmon's original YOLOv3
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
def __init__(self, model, autobalance=False, loss_weight=torch.tensor([])):
super(ComputeLoss, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
@ -429,14 +439,17 @@ class ComputeLoss:
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
self.loss_weight = loss_weight
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
alpha = 0.25 # default by base code
if loss_weight.numel()>0:
alpha = loss_weight # Overide the default from the paper
BCEcls, BCEobj = FocalLoss(BCEcls, g, alpha=alpha), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
@ -555,11 +568,12 @@ class ComputeLoss:
class ComputeLossOTA:
# Compute losses
def __init__(self, model, autobalance=False):
def __init__(self, model, autobalance=False, loss_weight=torch.tensor([])):
super(ComputeLossOTA, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
if loss_weight.numel() > 0:
raise ValueError('Not imp. yet')
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))

View File

@ -370,6 +370,18 @@ class TracedModel(nn.Module):
print(" model is traced! \n")
def forward(self, x, augment=False, profile=False):
out = self.model(x)
out = self.model(x)# feat = torch.mean(out[-1], dim=[2, 3])
self.features = [x.clone().detach() for x in out]
out = self.detect_layer(out)
return out
return out
# https://stackoverflow.com/questions/75319661/how-to-extract-and-visualize-feature-value-for-an-arbitrary-layer-during-inferen
"""
def plot_ts_feature_maps(feature_maps, tag=''):
import matplotlib
feature_maps = feature_maps.to(torch.float32)
ts.show(feature_maps)
ts.save(feature_maps, '/home/hanoch/projects/tir_od/output/' + tag + '.jpg')
plot_ts_feature_maps(torch.mean(out[-1], dim=[2, 3]))
"""