You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Comparing the AP calculation method in YOLOv5, it seems that in YOLOX, when computing map50, only the average precision is taken into account. However, doesn't AP represent the area under the precision-recall curve?
Therefore, the AP50 values printed using these two methods are different.
Here are two code snippets for comparison:
yolox:
def per_class_AP_table(coco_eval, class_names=COCO_CLASSES, headers=["class", "AP"], colums=6):
per_class_AP = {}
precisions = coco_eval.eval["precision"]
# dimension of precisions: [TxRxKxAxM]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
per_class_AP[name] = float(ap * 100)
num_cols = min(colums, len(per_class_AP) * len(headers))
result_pair = [x for pair in per_class_AP.items() for x in pair]
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)])
table_headers = headers * (num_cols // len(headers))
table = tabulate(
row_pair, tablefmt="pipe", floatfmt=".3f", headers=table_headers, numalign="left",
)
return table
yolov5:
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16,sample_rate=1000):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at [email protected]
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes, nt = np.unique(target_cls, return_counts=True)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, sample_rate), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, sample_rate)), np.zeros((nc, sample_rate))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = nt[ci] # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + eps) # recall curve
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + eps)
# print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!',f1.shape)
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
names = dict(enumerate(names)) # to dict
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
p, r, f1 = p[:, i], r[:, i], f1[:, i]
tp = (r * nt).round() # true positives
fp = (tp / (p + eps) - tp).round() # false positives
return tp, fp, p, r, f1, ap, unique_classes.astype(int)
The text was updated successfully, but these errors were encountered:
Comparing the AP calculation method in YOLOv5, it seems that in YOLOX, when computing map50, only the average precision is taken into account. However, doesn't AP represent the area under the precision-recall curve?
Therefore, the AP50 values printed using these two methods are different.
Here are two code snippets for comparison:
yolox:
yolov5:
The text was updated successfully, but these errors were encountered: