forked from lalonderodney/SegCaps
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
175 lines (151 loc) · 8.54 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
'''
Capsules for Object Segmentation (SegCaps)
Original Paper by Rodney LaLonde and Ulas Bagci (https://arxiv.org/abs/1804.04241)
Code written by: Rodney LaLonde
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at [email protected].
This is the main file for the project. From here you can train, test, and manipulate the SegCaps of models.
Please see the README for detailed instructions for this project.
'''
from __future__ import print_function
from os.path import join
from os import makedirs
from os import environ
import argparse
import SimpleITK as sitk
from time import gmtime, strftime
time = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
from keras.utils import print_summary
from load_3D_data import load_data, split_data
from model_helper import create_model
def main(args):
# Ensure training, testing, and manip are not all turned off
assert (args.train or args.test or args.manip), 'Cannot have train, test, and manip all set to 0, Nothing to do.'
# Load the training, validation, and testing data
try:
train_list, val_list, test_list = load_data(args.data_root_dir, args.split_num)
except:
# Create the training and test splits if not found
split_data(args.data_root_dir, num_splits=4)
train_list, val_list, test_list = load_data(args.data_root_dir, args.split_num)
# Get image properties from first image. Assume they are all the same.
img_shape = sitk.GetArrayFromImage(sitk.ReadImage(join(args.data_root_dir, 'imgs', train_list[0][0]))).shape
net_input_shape = (img_shape[1], img_shape[2], args.slices)
# Create the model for training/testing/manipulation
model_list = create_model(args=args, input_shape=net_input_shape)
print_summary(model=model_list[0], positions=[.38, .65, .75, 1.])
args.output_name = 'split-' + str(args.split_num) + '_batch-' + str(args.batch_size) + \
'_shuff-' + str(args.shuffle_data) + '_aug-' + str(args.aug_data) + \
'_loss-' + str(args.loss) + '_slic-' + str(args.slices) + \
'_sub-' + str(args.subsamp) + '_strid-' + str(args.stride) + \
'_lr-' + str(args.initial_lr) + '_recon-' + str(args.recon_wei)
args.time = time
args.check_dir = join(args.data_root_dir,'saved_models', args.net)
try:
makedirs(args.check_dir)
except:
pass
args.log_dir = join(args.data_root_dir,'logs', args.net)
try:
makedirs(args.log_dir)
except:
pass
args.tf_log_dir = join(args.log_dir, 'tf_logs')
try:
makedirs(args.tf_log_dir)
except:
pass
args.output_dir = join(args.data_root_dir, 'plots', args.net)
try:
makedirs(args.output_dir)
except:
pass
if args.train:
from train import train
# Run training
train(args, train_list, val_list, model_list[0], net_input_shape)
if args.test:
from test import test
# Run testing
test(args, test_list, model_list, net_input_shape)
if args.manip:
from manip import manip
# Run manipulation of segcaps
manip(args, test_list, model_list, net_input_shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train on Medical Data')
parser.add_argument('--data_root_dir', type=str, required=True,
help='The root directory for your data.')
parser.add_argument('--weights_path', type=str, default='',
help='/path/to/trained_model.hdf5 from root. Set to "" for none.')
parser.add_argument('--split_num', type=int, default=0,
help='Which training split to train/test on.')
parser.add_argument('--net', type=str.lower, default='segcapsr3',
choices=['segcapsr3', 'segcapsr1', 'segcapsbasic', 'unet', 'tiramisu'],
help='Choose your network.')
parser.add_argument('--train', type=int, default=1, choices=[0,1],
help='Set to 1 to enable training.')
parser.add_argument('--test', type=int, default=1, choices=[0,1],
help='Set to 1 to enable testing.')
parser.add_argument('--manip', type=int, default=1, choices=[0,1],
help='Set to 1 to enable manipulation.')
parser.add_argument('--shuffle_data', type=int, default=1, choices=[0,1],
help='Whether or not to shuffle the training data (both per epoch and in slice order.')
parser.add_argument('--aug_data', type=int, default=1, choices=[0,1],
help='Whether or not to use data augmentation during training.')
parser.add_argument('--loss', type=str.lower, default='w_bce', choices=['bce', 'w_bce', 'dice', 'mar', 'w_mar'],
help='Which loss to use. "bce" and "w_bce": unweighted and weighted binary cross entropy'
'"dice": soft dice coefficient, "mar" and "w_mar": unweighted and weighted margin loss.')
parser.add_argument('--batch_size', type=int, default=1,
help='Batch size for training/testing.')
parser.add_argument('--initial_lr', type=float, default=0.0001,
help='Initial learning rate for Adam.')
parser.add_argument('--recon_wei', type=float, default=131.072,
help="If using capsnet: The coefficient (weighting) for the loss of decoder")
parser.add_argument('--slices', type=int, default=1,
help='Number of slices to include for training/testing.')
parser.add_argument('--subsamp', type=int, default=-1,
help='Number of slices to skip when forming 3D samples for training. Enter -1 for random '
'subsampling up to 5% of total slices.')
parser.add_argument('--stride', type=int, default=1,
help='Number of slices to move when generating the next sample.')
parser.add_argument('--verbose', type=int, default=1, choices=[0, 1, 2],
help='Set the verbose value for training. 0: Silent, 1: per iteration, 2: per epoch.')
parser.add_argument('--save_raw', type=int, default=1, choices=[0,1],
help='Enter 0 to not save, 1 to save.')
parser.add_argument('--save_seg', type=int, default=1, choices=[0,1],
help='Enter 0 to not save, 1 to save.')
parser.add_argument('--save_prefix', type=str, default='',
help='Prefix to append to saved CSV.')
parser.add_argument('--thresh_level', type=float, default=0.,
help='Enter 0.0 for otsu thresholding, else set value')
parser.add_argument('--compute_dice', type=int, default=1,
help='0 or 1')
parser.add_argument('--compute_jaccard', type=int, default=1,
help='0 or 1')
parser.add_argument('--compute_assd', type=int, default=0,
help='0 or 1')
parser.add_argument('--which_gpus', type=str, default="0",
help='Enter "-2" for CPU only, "-1" for all GPUs available, '
'or a comma separated list of GPU id numbers ex: "0,1,4".')
parser.add_argument('--gpus', type=int, default=-1,
help='Number of GPUs you have available for training. '
'If entering specific GPU ids under the --which_gpus arg or if using CPU, '
'then this number will be inferred, else this argument must be included.')
arguments = parser.parse_args()
#
if arguments.which_gpus == -2:
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
elif arguments.which_gpus == '-1':
assert (arguments.gpus != -1), 'Use all GPUs option selected under --which_gpus, with this option the user MUST ' \
'specify the number of GPUs available with the --gpus option.'
else:
arguments.gpus = len(arguments.which_gpus.split(','))
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = str(arguments.which_gpus)
if arguments.gpus > 1:
assert arguments.batch_size >= arguments.gpus, 'Error: Must have at least as many items per batch as GPUs ' \
'for multi-GPU training. For model parallelism instead of ' \
'data parallelism, modifications must be made to the code.'
main(arguments)