diff --git a/adv_patch_gen/conv_visdrone_2_yolo/conv_visdrone_2_yolo_fmt.py b/adv_patch_gen/conv_visdrone_2_yolo/conv_visdrone_2_yolo_fmt.py index 64ec3db81ffd..1bb5672dc121 100644 --- a/adv_patch_gen/conv_visdrone_2_yolo/conv_visdrone_2_yolo_fmt.py +++ b/adv_patch_gen/conv_visdrone_2_yolo/conv_visdrone_2_yolo_fmt.py @@ -4,14 +4,15 @@ YOLOv7 requires an additional txt file (Same name as the first parent directory) with paths to the images for the train, val & test splits """ + +import argparse +import glob import os import os.path as osp -import glob -import argparse from typing import Optional -import tqdm import imagesize +import tqdm # VisDrone annot fmt # , , , , , , , diff --git a/adv_patch_gen/conv_visdrone_2_yolo/disp_visdrone.py b/adv_patch_gen/conv_visdrone_2_yolo/disp_visdrone.py index c01e41f7a68a..ddd6387a8a3f 100644 --- a/adv_patch_gen/conv_visdrone_2_yolo/disp_visdrone.py +++ b/adv_patch_gen/conv_visdrone_2_yolo/disp_visdrone.py @@ -4,8 +4,8 @@ sys.path.append(".") import cv2 -from adv_patch_gen.conv_visdrone_2_yolo.utils import get_annot_img_paths, load_visdrone_annots_as_np +from adv_patch_gen.conv_visdrone_2_yolo.utils import get_annot_img_paths, load_visdrone_annots_as_np # visdrone dataset classes # ignore(0), pedestrian(1), people(2), bicycle(3), diff --git a/adv_patch_gen/conv_visdrone_2_yolo/disp_yolo.py b/adv_patch_gen/conv_visdrone_2_yolo/disp_yolo.py index 035e89cc6d3d..bd5c0bbc8009 100644 --- a/adv_patch_gen/conv_visdrone_2_yolo/disp_yolo.py +++ b/adv_patch_gen/conv_visdrone_2_yolo/disp_yolo.py @@ -4,8 +4,8 @@ sys.path.append(".") import cv2 -from adv_patch_gen.conv_visdrone_2_yolo.utils import get_annot_img_paths, load_yolo_annots_as_np +from adv_patch_gen.conv_visdrone_2_yolo.utils import get_annot_img_paths, load_yolo_annots_as_np # visdrone dataset classes # ignore(0), pedestrian(1), people(2), bicycle(3), diff --git a/adv_patch_gen/conv_visdrone_2_yolo/utils.py b/adv_patch_gen/conv_visdrone_2_yolo/utils.py index bd08a1e48cf3..70ac84ea4b6a 100644 --- a/adv_patch_gen/conv_visdrone_2_yolo/utils.py +++ b/adv_patch_gen/conv_visdrone_2_yolo/utils.py @@ -1,6 +1,6 @@ -import os import glob -from typing import List, Tuple, Set +import os +from typing import List, Set, Tuple import numpy as np diff --git a/adv_patch_gen/utils/common.py b/adv_patch_gen/utils/common.py index 97f02beb6947..8b92049c8e9f 100644 --- a/adv_patch_gen/utils/common.py +++ b/adv_patch_gen/utils/common.py @@ -1,11 +1,11 @@ """Common utils.""" + import socket from typing import Tuple, Union import numpy as np from PIL import Image - IMG_EXTNS = {".png", ".jpg", ".jpeg"} diff --git a/adv_patch_gen/utils/config_parser.py b/adv_patch_gen/utils/config_parser.py index 459d6b7d9fc5..d3299e717f34 100644 --- a/adv_patch_gen/utils/config_parser.py +++ b/adv_patch_gen/utils/config_parser.py @@ -1,6 +1,8 @@ """Create argparse options for config files.""" -import json + import argparse +import json + from easydict import EasyDict as edict diff --git a/adv_patch_gen/utils/dataset.py b/adv_patch_gen/utils/dataset.py index 123416c8c035..00239c84dff0 100644 --- a/adv_patch_gen/utils/dataset.py +++ b/adv_patch_gen/utils/dataset.py @@ -1,18 +1,18 @@ """Dataset Class for loading YOLO format datasets where the source data dir has the image and labels subdirs where each image must have a corresponding label file with the same name. """ + import glob import os.path as osp -from typing import Tuple, Optional +from typing import Optional, Tuple import numpy as np -from PIL import Image import torch import torch.nn.functional as F +from PIL import Image from torch.utils.data import Dataset from torchvision import transforms - IMG_EXTNS = {".png", ".jpg", ".jpeg"} diff --git a/adv_patch_gen/utils/loss.py b/adv_patch_gen/utils/loss.py index 52f9c452eb9e..27e6586b2a9d 100644 --- a/adv_patch_gen/utils/loss.py +++ b/adv_patch_gen/utils/loss.py @@ -1,4 +1,5 @@ """Loss functions used in patch generation.""" + from typing import Tuple import torch diff --git a/adv_patch_gen/utils/patch.py b/adv_patch_gen/utils/patch.py index d00ea64fcc42..f1e24d7e0422 100644 --- a/adv_patch_gen/utils/patch.py +++ b/adv_patch_gen/utils/patch.py @@ -1,6 +1,7 @@ """Modules for creating adversarial object patch.""" + import math -from typing import Union, Tuple +from typing import Tuple, Union import numpy as np import torch diff --git a/adv_patch_gen/utils/scheduler.py b/adv_patch_gen/utils/scheduler.py index 4750123427ab..1030d76c8625 100644 --- a/adv_patch_gen/utils/scheduler.py +++ b/adv_patch_gen/utils/scheduler.py @@ -3,8 +3,8 @@ # Gradual increase in learning rate by a constant amount to avoid sudden increase in lr import warnings -from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau +from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler EPOCH_DEPRECATION_WARNING = ( "The epoch parameter in `scheduler.step()` was not necessary and is being " diff --git a/adv_patch_gen/utils/video.py b/adv_patch_gen/utils/video.py index 42da72101db5..7ce1eadb8b18 100644 --- a/adv_patch_gen/utils/video.py +++ b/adv_patch_gen/utils/video.py @@ -1,6 +1,6 @@ import os.path as osp +from subprocess import PIPE, Popen from typing import Optional -from subprocess import Popen, PIPE def ffmpeg_create_video_from_image_dir( diff --git a/detect_oak.py b/detect_oak.py index 8f5dbb3fc1e4..1a73580404c6 100644 --- a/detect_oak.py +++ b/detect_oak.py @@ -5,15 +5,16 @@ requirements: depthai-sdk==1.9.4 """ + import time -from typing import Tuple from threading import Thread +from typing import Tuple import cv2 -import torch -import numpy as np import depthai +import numpy as np import onnxruntime +import torch from models.common import DetectMultiBackend from utils.augmentations import letterbox diff --git a/explainer.py b/explainer.py index f21d31519bfc..091a4e6dc4f1 100644 --- a/explainer.py +++ b/explainer.py @@ -17,16 +17,16 @@ from pytorch_grad_cam import ( AblationCAM, EigenCAM, + EigenGradCAM, FullGrad, GradCAM, + GradCAMElementWise, GradCAMPlusPlus, HiResCAM, - ScoreCAM, - XGradCAM, - EigenGradCAM, - GradCAMElementWise, LayerCAM, RandomCAM, + ScoreCAM, + XGradCAM, ) from pytorch_grad_cam.utils.image import scale_cam_image, show_cam_on_image @@ -36,13 +36,11 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from utils.general import print_args, check_file, non_max_suppression -from utils.torch_utils import select_device - from models.common import DetectMultiBackend -from utils.dataloaders import LoadImages, IMG_FORMATS, VID_FORMATS -from utils.general import check_img_size, scale_boxes +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages +from utils.general import check_file, check_img_size, non_max_suppression, print_args, scale_boxes from utils.plots import Annotator, colors +from utils.torch_utils import select_device def yolo_reshape_transform(x): diff --git a/export.py b/export.py index 9ea2b936d740..8fe7ce8fdce1 100644 --- a/export.py +++ b/export.py @@ -742,9 +742,9 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: model = ct.models.MLModel(pipeline.spec) model.input_description["image"] = "Input image" model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})" - model.input_description[ - "confidenceThreshold" - ] = f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})" + model.input_description["confidenceThreshold"] = ( + f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})" + ) model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")' model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)" model.save(f) # pipelined diff --git a/models/experimental.py b/models/experimental.py index ab229d50e30f..62ee802f6248 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,5 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """Experimental modules.""" + import math import numpy as np diff --git a/test_patch.py b/test_patch.py index 61ebe56cbe66..acc7fba61c49 100644 --- a/test_patch.py +++ b/test_patch.py @@ -1,38 +1,38 @@ """Testing code for evaluating Adversarial patches against object detection.""" + +import glob import io +import json import os import os.path as osp -import time -import json -import glob import random -from pathlib import Path -from typing import Optional, List, Tuple +import time from contextlib import redirect_stdout +from pathlib import Path +from typing import List, Optional, Tuple -import tqdm import numpy as np -from PIL import Image -from easydict import EasyDict as edict import torch -from torchvision import transforms +import tqdm +from easydict import EasyDict as edict +from PIL import Image from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval +from torchvision import transforms -from models.common import DetectMultiBackend -from utils.metrics import ConfusionMatrix -from utils.general import non_max_suppression, xyxy2xywh -from utils.torch_utils import select_device -from utils.plots import Annotator, colors - +from adv_patch_gen.utils.common import IMG_EXTNS, BColors, pad_to_square from adv_patch_gen.utils.config_parser import get_argparser, load_config_object from adv_patch_gen.utils.patch import PatchApplier, PatchTransformer -from adv_patch_gen.utils.common import pad_to_square, BColors, IMG_EXTNS from adv_patch_gen.utils.video import ( - ffmpeg_create_video_from_image_dir, - ffmpeg_combine_two_vids, ffmpeg_combine_three_vids, + ffmpeg_combine_two_vids, + ffmpeg_create_video_from_image_dir, ) +from models.common import DetectMultiBackend +from utils.general import non_max_suppression, xyxy2xywh +from utils.metrics import ConfusionMatrix +from utils.plots import Annotator, colors +from utils.torch_utils import select_device # optionally set seed for repeatability SEED = 42 diff --git a/train_patch.py b/train_patch.py index b50e35b9dc6c..9d650e4e328e 100644 --- a/train_patch.py +++ b/train_patch.py @@ -3,38 +3,36 @@ python train_patch.py --cfg config_json_file """ + +import glob +import json import os import os.path as osp +import random import time -import json from contextlib import nullcontext -import glob -import random import numpy as np -from PIL import Image -from tqdm import tqdm -from easydict import EasyDict as edict - import torch import torch.nn.functional as F -from torch import optim, autograd +from easydict import EasyDict as edict +from PIL import Image +from tensorboard import program +from torch import autograd, optim from torch.cuda.amp import autocast -from torchvision import transforms as T - from torch.utils.tensorboard import SummaryWriter -from tensorboard import program - -from models.common import DetectMultiBackend -from utils.torch_utils import select_device -from utils.general import non_max_suppression, xyxy2xywh +from torchvision import transforms as T +from tqdm import tqdm -from test_patch import PatchTester +from adv_patch_gen.utils.common import IMG_EXTNS, is_port_in_use, pad_to_square from adv_patch_gen.utils.config_parser import get_argparser, load_config_object -from adv_patch_gen.utils.common import is_port_in_use, pad_to_square, IMG_EXTNS from adv_patch_gen.utils.dataset import YOLODataset +from adv_patch_gen.utils.loss import MaxProbExtractor, NPSLoss, SaliencyLoss, TotalVariationLoss from adv_patch_gen.utils.patch import PatchApplier, PatchTransformer -from adv_patch_gen.utils.loss import MaxProbExtractor, SaliencyLoss, TotalVariationLoss, NPSLoss +from models.common import DetectMultiBackend +from test_patch import PatchTester +from utils.general import non_max_suppression, xyxy2xywh +from utils.torch_utils import select_device # optionally set seed for repeatability SEED = None diff --git a/utils/augmentations.py b/utils/augmentations.py index 64f71db24400..dfac11f28776 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,9 +1,9 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """Image augmentation functions.""" -import os import glob import math +import os import random import cv2 diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c930095f452d..3991563e7910 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -27,8 +27,8 @@ from tqdm import tqdm from utils.augmentations import ( - BboxPatcher, Albumentations, + BboxPatcher, augment_hsv, classify_albumentations, classify_transforms, diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index c3fbded50a3c..2a2c5d734c2e 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,5 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """Logging utils.""" + import json import os import warnings diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 1bbea61effc2..6a6ed7636c88 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -1,4 +1,5 @@ """Main Logger class for ClearML experiment tracking.""" + import glob import re from pathlib import Path