From c34e5fba304986a559c229bf85e55f40fabbc21b Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Mon, 26 Feb 2024 19:11:08 +0000 Subject: [PATCH] Auto-format by https://ultralytics.com/actions --- adv_patch_gen/README.md | 2 +- adv_patch_gen/configs/README.md | 89 ++++++++++---------- adv_patch_gen/conv_visdrone_2_yolo/README.md | 20 +++-- 3 files changed, 57 insertions(+), 54 deletions(-) diff --git a/adv_patch_gen/README.md b/adv_patch_gen/README.md index d03cbfa2cba0..c0a43403fa3d 100644 --- a/adv_patch_gen/README.md +++ b/adv_patch_gen/README.md @@ -33,4 +33,4 @@ pip install Cython sudo apt-get install python3.8-dev # or any desired python version pip install pycocotools # https://github.com/ppwwyyxx/cocoapi # pip install git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI # depreciated -``` \ No newline at end of file +``` diff --git a/adv_patch_gen/configs/README.md b/adv_patch_gen/configs/README.md index 3503be5e311d..638f09bfc100 100644 --- a/adv_patch_gen/configs/README.md +++ b/adv_patch_gen/configs/README.md @@ -1,46 +1,47 @@ # Adversarial Patch Training Config Reference - - "image_dir": "data/train/images", - "label_dir": "data/train/labels", - "val_image_dir": "data/val/images", # epoch freq for running validation run. 1 means validate after every epoch. 0 or null means no val - "use_even_odd_images": "all", # (str), ('all', 'even', 'odd'): use images with even/odd numbers in the last char of their filenames - "log_dir": "runs/train_adversarial", - "tensorboard_port": 8994, - "tensorboard_batch_log_interval": 15, - "weights_file": "runs/weights/best.pt", - "triplet_printfile": "triplets.csv", - "device": "cuda:0", # (str): 'cpu' or 'cuda' or 'cuda:0,1,2,3' - "use_amp": true, - "patch_name": "base", - "val_epoch_freq": 100, - "patch_save_epoch_freq": 1, # int freq for saving patches. 1 means save after every epoch - "model_in_sz": [640, 640], # (int, int): model input height, width - "patch_src": "gray", # str: gray random, or path_to_init_patch - "patch_img_mode": "RGB", # str: patch channel image mode. Currently RGB * L supported - "patch_size": [64, 64], # (int, int): must be (height, width) - "objective_class_id": null, # int: class id to target for adv attack. Use null for general attack for all classes - "min_pixel_area": null, # int: min pixel area to use for training. Pixel area chosen after resizing to model in size - "target_size_frac": 0.3, # float: patch proportion size compared to bbox size. Range also accepted i.e. [0.25, 0.4] - "use_mul_add_gau": true, # bool: use mul & add gaussian noise or not to patches - "mul_gau_mean": 0.5, # float: mul gaussian noise mean (reduces contrast) mean. Range also accepted i.e. [0.25, 0.4] - "mul_gau_std": 0.1, # float: mul gaussian noise std (Adds rand noise) - "random_patch_loc": true, # bool: place/translate patches randomly on bbox - "x_off_loc": [-0.25, 0.25], # [float, float]: left, right x-axis disp from bbox center - "y_off_loc": [-0.25, 0.25], # [float, float]: top, bottom y-axis disp from bbox center - "rotate_patches": true, # bool: rotate patches or not - "transform_patches": true, # bool: add bightness, contrast and noise transforms to patches or not - "patch_pixel_range": [0, 255], # [int, int]: patch pixel range, range is [0, 255], numbers div by 255 in patches - "patch_alpha": 1, # float: patch opacity, recommended to set to 1 - "class_list": ["class1", "class2"], - "n_classes": 2, - "n_epochs": 300, - "max_labels": 48, - "start_lr": 0.03, - "min_tv_loss": 0.1, - "sal_mult": 1.0, - "tv_mult": 2.5, - "nps_mult": 0.01, # float: Use 0.01 when not using sal. With sal use 0.001 - "batch_size": 8, - "debug_mode": false, # bool: if yes, images with adv drawn saved during each batch - "loss_target": "obj * cls" # str: 'obj', 'cls', 'obj * cls' +``` + "image_dir": "data/train/images", + "label_dir": "data/train/labels", + "val_image_dir": "data/val/images", # epoch freq for running validation run. 1 means validate after every epoch. 0 or null means no val + "use_even_odd_images": "all", # (str), ('all', 'even', 'odd'): use images with even/odd numbers in the last char of their filenames + "log_dir": "runs/train_adversarial", + "tensorboard_port": 8994, + "tensorboard_batch_log_interval": 15, + "weights_file": "runs/weights/best.pt", + "triplet_printfile": "triplets.csv", + "device": "cuda:0", # (str): 'cpu' or 'cuda' or 'cuda:0,1,2,3' + "use_amp": true, + "patch_name": "base", + "val_epoch_freq": 100, + "patch_save_epoch_freq": 1, # int freq for saving patches. 1 means save after every epoch + "model_in_sz": [640, 640], # (int, int): model input height, width + "patch_src": "gray", # str: gray random, or path_to_init_patch + "patch_img_mode": "RGB", # str: patch channel image mode. Currently RGB * L supported + "patch_size": [64, 64], # (int, int): must be (height, width) + "objective_class_id": null, # int: class id to target for adv attack. Use null for general attack for all classes + "min_pixel_area": null, # int: min pixel area to use for training. Pixel area chosen after resizing to model in size + "target_size_frac": 0.3, # float: patch proportion size compared to bbox size. Range also accepted i.e. [0.25, 0.4] + "use_mul_add_gau": true, # bool: use mul & add gaussian noise or not to patches + "mul_gau_mean": 0.5, # float: mul gaussian noise mean (reduces contrast) mean. Range also accepted i.e. [0.25, 0.4] + "mul_gau_std": 0.1, # float: mul gaussian noise std (Adds rand noise) + "random_patch_loc": true, # bool: place/translate patches randomly on bbox + "x_off_loc": [-0.25, 0.25], # [float, float]: left, right x-axis disp from bbox center + "y_off_loc": [-0.25, 0.25], # [float, float]: top, bottom y-axis disp from bbox center + "rotate_patches": true, # bool: rotate patches or not + "transform_patches": true, # bool: add bightness, contrast and noise transforms to patches or not + "patch_pixel_range": [0, 255], # [int, int]: patch pixel range, range is [0, 255], numbers div by 255 in patches + "patch_alpha": 1, # float: patch opacity, recommended to set to 1 + "class_list": ["class1", "class2"], + "n_classes": 2, + "n_epochs": 300, + "max_labels": 48, + "start_lr": 0.03, + "min_tv_loss": 0.1, + "sal_mult": 1.0, + "tv_mult": 2.5, + "nps_mult": 0.01, # float: Use 0.01 when not using sal. With sal use 0.001 + "batch_size": 8, + "debug_mode": false, # bool: if yes, images with adv drawn saved during each batch + "loss_target": "obj * cls" # str: 'obj', 'cls', 'obj * cls' +``` diff --git a/adv_patch_gen/conv_visdrone_2_yolo/README.md b/adv_patch_gen/conv_visdrone_2_yolo/README.md index 07c52c1d0409..345cbe546194 100644 --- a/adv_patch_gen/conv_visdrone_2_yolo/README.md +++ b/adv_patch_gen/conv_visdrone_2_yolo/README.md @@ -20,14 +20,16 @@ Annotations for the detections are follows: `, , , , , , , ` - - The x coordinate of the top-left corner of the predicted bounding box - - The y coordinate of the top-left corner of the predicted object bounding box - - The width in pixels of the predicted object bounding box - - The height in pixels of the predicted object bounding box - - The score in the DETECTION file indicates the confidence of the predicted bounding box enclosing an object instance. The score in GROUNDTRUTH file is set to 1 or 0. 1 indicates the bounding box is considered in evaluation, while 0 indicates the bounding box will be ignored. - - The object category indicates the type of annotated object, (i.e., ignored regions(0), pedestrian(1), people(2), bicycle(3), car(4), van(5), truck(6), tricycle(7), awning-tricycle(8), bus(9), motor(10), others(11)) - - The score in the DETECTION result file should be set to the constant -1.The score in the GROUNDTRUTH file indicates the degree of object parts appears outside a frame (i.e., no truncation = 0 (truncation ratio 0%), and partial truncation = 1 (truncation ratio 1% ~ 50%)). - - The score in the DETECTION file should be set to the constant -1. The score in the GROUNDTRUTH file indicates the fraction of objects being occluded (i.e., no occlusion = 0 (occlusion ratio 0%), partial occlusion = 1 (occlusion ratio 1% ~ 50%), and heavy occlusion = 2 (occlusion ratio 50% ~ 100%)). +``` + - The x coordinate of the top-left corner of the predicted bounding box + - The y coordinate of the top-left corner of the predicted object bounding box + - The width in pixels of the predicted object bounding box + - The height in pixels of the predicted object bounding box + - The score in the DETECTION file indicates the confidence of the predicted bounding box enclosing an object instance. The score in GROUNDTRUTH file is set to 1 or 0. 1 indicates the bounding box is considered in evaluation, while 0 indicates the bounding box will be ignored. + - The object category indicates the type of annotated object, (i.e., ignored regions(0), pedestrian(1), people(2), bicycle(3), car(4), van(5), truck(6), tricycle(7), awning-tricycle(8), bus(9), motor(10), others(11)) + - The score in the DETECTION result file should be set to the constant -1.The score in the GROUNDTRUTH file indicates the degree of object parts appears outside a frame (i.e., no truncation = 0 (truncation ratio 0%), and partial truncation = 1 (truncation ratio 1% ~ 50%)). + - The score in the DETECTION file should be set to the constant -1. The score in the GROUNDTRUTH file indicates the fraction of objects being occluded (i.e., no occlusion = 0 (occlusion ratio 0%), partial occlusion = 1 (occlusion ratio 1% ~ 50%), and heavy occlusion = 2 (occlusion ratio 50% ~ 100%)). +``` ## Download VisDrone Dataset @@ -81,4 +83,4 @@ Note: To convert to COCO annotation format refer to