-
Notifications
You must be signed in to change notification settings - Fork 0
/
tester.py
121 lines (108 loc) · 5.3 KB
/
tester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import time
import cv2
import torch
import numpy as np
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms.functional as TF
from loguru import logger
from tqdm import tqdm
from trainer_dgt import Trainer
from utils.helpers import dir_exists, remove_files, double_threshold_iteration
from utils.metrics import AverageMeter, get_metrics, get_metrics, count_connect_component
import ttach as tta
from PIL import Image
class Tester(Trainer):
def __init__(self, model, loss, CFG, checkpoint, test_loader, dataset_path, show=False):
# super(Trainer, self).__init__()
self.loss = loss
self.CFG = CFG
self.test_loader = test_loader
self.model = nn.DataParallel(model.cuda())
self.dataset_path = dataset_path
self.show = show
self.model.load_state_dict(checkpoint['state_dict'])
if self.show:
dir_exists("save_picture")
remove_files("save_picture")
cudnn.benchmark = True
def test(self):
if self.CFG.tta:
self.model = tta.SegmentationTTAWrapper(
self.model, tta.aliases.d4_transform(), merge_mode='mean')
self.model.eval()
self._reset_metrics()
tbar = tqdm(self.test_loader, ncols=150)
tic = time.time()
with torch.no_grad():
for i, (img, gt, mask) in enumerate(tbar):
self.data_time.update(time.time() - tic)
img = img.cuda(non_blocking=True)
gt = gt.cuda(non_blocking=True)
pre = self.model(img)
if i<10:
continue
#mask
mask = mask[0].cpu().numpy() # 转为numpy数组,并取第一张图片
#gt_mask
gt_mask = gt[0].cpu().numpy() # 转为numpy数组,并取第一张图片
gt_mask_pil = Image.fromarray(((gt_mask[0]*mask[0]) * 255).astype('uint8'))
save_path1 = "/root/FR-UNet-master/dataset/gt_mask.png" # 保存路径
gt_mask_pil.save(save_path1)
#pre_mask
pre_mask = nn.Sigmoid()(pre)[0].cpu().numpy() # 转为numpy数组,并取第一张图片
pre_mask_pil1 = Image.fromarray(((pre_mask[0]*mask[0]) * 255).astype('uint8'))
save_path2 = "/root/FR-UNet-master/dataset/pre_mask.png" # 保存路径
pre_mask_pil1.save(save_path2)
exit()
loss = self.loss(pre, gt)
self.total_loss.update(loss.item())
self.batch_time.update(time.time() - tic)
if self.dataset_path.endswith("DRIVE"):
H, W = 584, 565
elif self.dataset_path.endswith("CHASEDB1"):
H, W = 960, 999
elif self.dataset_path.endswith("DCA1"):
H, W = 300, 300
if not self.dataset_path.endswith("CHUAC"):
img = TF.crop(img, 0, 0, H, W)
gt = TF.crop(gt, 0, 0, H, W)
pre = TF.crop(pre, 0, 0, H, W)
img = img[0,0,...]
gt = gt[0,0,...]
pre = pre[0,0,...]
if self.show:
predict = torch.sigmoid(pre).cpu().detach().numpy()
predict_b = np.where(predict >= self.CFG.threshold, 1, 0)
cv2.imwrite(
f"save_picture/img{i}.png", np.uint8(img.cpu().numpy()*255))
cv2.imwrite(
f"save_picture/gt{i}.png", np.uint8(gt.cpu().numpy()*255))
cv2.imwrite(
f"save_picture/pre{i}.png", np.uint8(predict*255))
cv2.imwrite(
f"save_picture/pre_b{i}.png", np.uint8(predict_b*255))
if self.CFG.DTI:
pre_DTI = double_threshold_iteration(
i, pre, self.CFG.threshold, self.CFG.threshold_low, True)
self._metrics_update(
*get_metrics(pre, gt, predict_b=pre_DTI).values())
if self.CFG.CCC:
self.CCC.update(count_connect_component(pre_DTI, gt))
else:
self._metrics_update(
*get_metrics(pre, gt, self.CFG.threshold).values())
if self.CFG.CCC:
self.CCC.update(count_connect_component(
pre, gt, threshold=self.CFG.threshold))
tbar.set_description(
'TEST ({}) | Loss: {:.4f} | AUC {:.4f} F1 {:.4f} Acc {:.4f} Sen {:.4f} Spe {:.4f} Pre {:.4f} IOU {:.4f} |B {:.2f} D {:.2f} |'.format(
i, self.total_loss.average, *self._metrics_ave().values(), self.batch_time.average, self.data_time.average))
tic = time.time()
logger.info(f"###### TEST EVALUATION ######")
logger.info(f'test time: {self.batch_time.average}')
logger.info(f' loss: {self.total_loss.average}')
if self.CFG.CCC:
logger.info(f' CCC: {self.CCC.average}')
for k, v in self._metrics_ave().items():
logger.info(f'{str(k):5s}: {v}')